Compare commits

...

2 Commits

Author SHA1 Message Date
Thomas Boerger 4f436274cd Switched to 0.5 build process 9 years ago
Thomas Boerger 0039b2fe29 Added vendoring for reproducable builds 9 years ago
  1. 1
      .drone.sec
  2. 29
      .drone.yml
  3. 1
      .drone.yml.sig
  4. 9
      vendor.yml
  5. 6
      vendor/github.com/aws/aws-sdk-go/Gemfile
  6. 202
      vendor/github.com/aws/aws-sdk-go/LICENSE.txt
  7. 152
      vendor/github.com/aws/aws-sdk-go/Makefile
  8. 3
      vendor/github.com/aws/aws-sdk-go/NOTICE.txt
  9. 108
      vendor/github.com/aws/aws-sdk-go/README.md
  10. 145
      vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
  11. 194
      vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
  12. 100
      vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
  13. 27
      vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
  14. 222
      vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
  15. 107
      vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
  16. 89
      vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
  17. 120
      vendor/github.com/aws/aws-sdk-go/aws/client/client.go
  18. 90
      vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
  19. 12
      vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
  20. 358
      vendor/github.com/aws/aws-sdk-go/aws/config.go
  21. 369
      vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
  22. 152
      vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
  23. 17
      vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
  24. 100
      vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
  25. 223
      vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
  26. 178
      vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
  27. 77
      vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
  28. 12
      vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
  29. 151
      vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
  30. 48
      vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
  31. 161
      vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
  32. 98
      vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
  33. 140
      vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
  34. 124
      vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
  35. 17
      vendor/github.com/aws/aws-sdk-go/aws/errors.go
  36. 112
      vendor/github.com/aws/aws-sdk-go/aws/logger.go
  37. 187
      vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
  38. 33
      vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
  39. 31
      vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go
  40. 49
      vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
  41. 329
      vendor/github.com/aws/aws-sdk-go/aws/request/request.go
  42. 104
      vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
  43. 101
      vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
  44. 234
      vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
  45. 120
      vendor/github.com/aws/aws-sdk-go/aws/session/session.go
  46. 106
      vendor/github.com/aws/aws-sdk-go/aws/types.go
  47. 8
      vendor/github.com/aws/aws-sdk-go/aws/version.go
  48. 4
      vendor/github.com/aws/aws-sdk-go/private/README.md
  49. 65
      vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
  50. 75
      vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
  51. 88
      vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
  52. 75
      vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
  53. 36
      vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
  54. 230
      vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
  55. 35
      vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
  56. 66
      vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
  57. 256
      vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
  58. 45
      vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
  59. 198
      vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
  60. 21
      vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
  61. 293
      vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
  62. 260
      vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
  63. 105
      vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
  64. 82
      vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go
  65. 465
      vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go
  66. 7
      vendor/github.com/aws/aws-sdk-go/sdk.go
  67. 5
      vendor/github.com/aws/aws-sdk-go/service/generate.go
  68. 1478
      vendor/github.com/aws/aws-sdk-go/service/sts/api.go
  69. 12
      vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
  70. 130
      vendor/github.com/aws/aws-sdk-go/service/sts/service.go
  71. 202
      vendor/github.com/drone/drone-plugin-go/LICENSE
  72. 103
      vendor/github.com/drone/drone-plugin-go/README.md
  73. 17
      vendor/github.com/drone/drone-plugin-go/plugin/const.go
  74. 131
      vendor/github.com/drone/drone-plugin-go/plugin/param.go
  75. 97
      vendor/github.com/drone/drone-plugin-go/plugin/types.go
  76. 191
      vendor/github.com/go-ini/ini/LICENSE
  77. 12
      vendor/github.com/go-ini/ini/Makefile
  78. 638
      vendor/github.com/go-ini/ini/README.md
  79. 625
      vendor/github.com/go-ini/ini/README_ZH.md
  80. 465
      vendor/github.com/go-ini/ini/ini.go
  81. 616
      vendor/github.com/go-ini/ini/key.go
  82. 312
      vendor/github.com/go-ini/ini/parser.go
  83. 197
      vendor/github.com/go-ini/ini/section.go
  84. 351
      vendor/github.com/go-ini/ini/struct.go
  85. 13
      vendor/github.com/jmespath/go-jmespath/LICENSE
  86. 44
      vendor/github.com/jmespath/go-jmespath/Makefile
  87. 7
      vendor/github.com/jmespath/go-jmespath/README.md
  88. 49
      vendor/github.com/jmespath/go-jmespath/api.go
  89. 16
      vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
  90. 842
      vendor/github.com/jmespath/go-jmespath/functions.go
  91. 418
      vendor/github.com/jmespath/go-jmespath/interpreter.go
  92. 420
      vendor/github.com/jmespath/go-jmespath/lexer.go
  93. 603
      vendor/github.com/jmespath/go-jmespath/parser.go
  94. 16
      vendor/github.com/jmespath/go-jmespath/toktype_string.go
  95. 185
      vendor/github.com/jmespath/go-jmespath/util.go

1
.drone.sec

@ -1 +0,0 @@
eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.WoRVIb7SPdwybHIj2HNXmcPfPRWphFCYCxNlyzYohkHRywaT9JVLZ3ONWwM5KY0GLYSwfOuiZCi9QmFoiq8IIWaxQ9J-zly4MpX2B6TatTIUTmySyPMfmc0ESkV1pkR020CVWEd37SBAfDng4kX-34fLxrKLeo4tubJZ11wtfqRhHJp2uIpcLhAkKM4TIUKCSHwiEbwazD-o9xfDE6_N7zKz7jHPLy6Ei5sBfy_elWmBcu9KFj0BviVP1LoULnQhArQE5AvdIsLK5lqKgxn7tsHSxb_Y5VwcgzxnBwPmOcvhQeTpsPiKJeMbZ-JY0e3XRu_neZ360PEbQRveYlpzjA.WQRLA_X14KUYTi0n.MlsWQJsDQva7hPjcGsLMr6E8X-m93rnUl6g4hjyH3GibaiE8wrvhpT1i7PbK0yXhq12V0cUtCpq8EEYrv6n8qQtBKmC9QKZmaB8ozvDuhuKxTpstp6pPlBJKswW56oiq9f5uDFLifEHpcdzyPTZp9d_Ty-vXto2sCkFEHUZIvlqUbk27nXAcLbPs9x4JePam4k5bCE775Bc9LSiAG_xyfTKq72Dq1Q_U3xqJxIcKlbFfvHw2SObCzRT-WP_evOtkLwfCU5AcH5R9crVjqmY94TjKfdDkXjbr6SUSG5c5XGkrnxmqLEueE3YDGTyQ1l1jhvrK6hNDk_h_1sF-5SCYY4yI0Ni2WBqLOKS3VL2PE7yGkAYhJmusDvlTmZLo2PMUV3i0EBbW5eYr2k2QLDEwwh7yjcvKRRMS1vMEMXMf1VI.kxogGcKgvOYmZW4gdNdirw

29
.drone.yml

@ -1,33 +1,20 @@
build:
image: golang:1.5
pipeline:
test:
image: golang:1.6
environment:
- CGO_ENABLED=0
- GOPATH=/drone
commands:
- make deps
- make vet
- make build
- make test
publish:
coverage:
when:
branch: master
docker:
username: $$DOCKER_USER
password: $$DOCKER_PASS
email: $$DOCKER_EMAIL
repo: plugins/drone-terraform
tag: latest
when:
branch: master
docker:
username: $$DOCKER_USER
password: $$DOCKER_PASS
email: $$DOCKER_EMAIL
storage_driver: overlay
repo: plugins/drone-terraform
tag: develop
tag: [ "latest" ]
when:
branch: develop
branch: release/0.4
event: push
plugin:
name: Terraform

1
.drone.yml.sig

@ -0,0 +1 @@
eyJhbGciOiJIUzI1NiJ9.cGlwZWxpbmU6CiAgdGVzdDoKICAgIGltYWdlOiBnb2xhbmc6MS42CiAgICBlbnZpcm9ubWVudDoKICAgICAgLSBDR09fRU5BQkxFRD0wCiAgICAgIC0gR09QQVRIPS9kcm9uZQogICAgY29tbWFuZHM6CiAgICAgIC0gbWFrZSB2ZXQKICAgICAgLSBtYWtlIGJ1aWxkCiAgICAgIC0gbWFrZSB0ZXN0CiAgZG9ja2VyOgogICAgc3RvcmFnZV9kcml2ZXI6IG92ZXJsYXkKICAgIHJlcG86IHBsdWdpbnMvZHJvbmUtdGVycmFmb3JtCiAgICB0YWc6IFsgImxhdGVzdCIgXQogICAgd2hlbjoKICAgICAgYnJhbmNoOiByZWxlYXNlLzAuNAogICAgICBldmVudDogcHVzaAoKcGx1Z2luOgogIG5hbWU6IFRlcnJhZm9ybQogIGRlc2M6IEV4ZWN1dGUgVGVycmFmb3JtIHBsYW4gYW5kIGFwcGx5CiAgdHlwZTogZGVwbG95CiAgaW1hZ2U6IHBsdWdpbnMvZHJvbmUtdGVycmFmb3JtCiAgbGFiZWxzOgogICAgLSB0ZXJyYWZvcm0K.EipgzT0eVf9Hxjc3O8r6cZwLpWFuCGRGKHvDOFEraXY

9
vendor.yml

@ -0,0 +1,9 @@
vendors:
- path: github.com/aws/aws-sdk-go
rev: d3b6d25135b4e09ecd39ea3c2a5cad35279b0515
- path: github.com/drone/drone-plugin-go
rev: d6109f644c5935c22620081b4c234bb2263743c7
- path: github.com/go-ini/ini
rev: 72ba3e6b9e6b87e0c74c9a7a4dc86e8dd8ba4355
- path: github.com/jmespath/go-jmespath
rev: 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74

6
vendor/github.com/aws/aws-sdk-go/Gemfile

@ -0,0 +1,6 @@
source 'https://rubygems.org'
gem 'yard', git: 'git://github.com/lsegal/yard', ref: '5025564a491e1b7c6192632cba2802202ca08449'
gem 'yard-go', git: 'git://github.com/jasdel/yard-go', ref: 'e78e1ef7cdf5e0f3266845b26bb4fd64f1dd6f85'
gem 'rdiscount'

202
vendor/github.com/aws/aws-sdk-go/LICENSE.txt

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

152
vendor/github.com/aws/aws-sdk-go/Makefile

@ -0,0 +1,152 @@
LINTIGNOREDOT='awstesting/integration.+should not use dot imports'
LINTIGNOREDOC='service/[^/]+/(api|service|waiters)\.go:.+(comment on exported|should have comment or be unexported)'
LINTIGNORECONST='service/[^/]+/(api|service|waiters)\.go:.+(type|struct field|const|func) ([^ ]+) should be ([^ ]+)'
LINTIGNORESTUTTER='service/[^/]+/(api|service)\.go:.+(and that stutters)'
LINTIGNOREINFLECT='service/[^/]+/(api|service)\.go:.+method .+ should be '
LINTIGNOREINFLECTS3UPLOAD='service/s3/s3manager/upload\.go:.+struct field SSEKMSKeyId should be '
LINTIGNOREDEPS='vendor/.+\.go'
SDK_WITH_VENDOR_PKGS=$(shell go list ./... | grep -v "/vendor/src")
SDK_ONLY_PKGS=$(shell go list ./... | grep -v "/vendor/")
SDK_GO_1_4=$(shell go version | grep "go1.4")
SDK_GO_VERSION=$(shell go version | awk '''{print $$3}''' | tr -d '''\n''')
all: get-deps generate unit
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " api_info to print a list of services and versions"
@echo " docs to build SDK documentation"
@echo " build to go build the SDK"
@echo " unit to run unit tests"
@echo " integration to run integration tests"
@echo " performance to run performance tests"
@echo " verify to verify tests"
@echo " lint to lint the SDK"
@echo " vet to vet the SDK"
@echo " generate to go generate and make services"
@echo " gen-test to generate protocol tests"
@echo " gen-services to generate services"
@echo " get-deps to go get the SDK dependencies"
@echo " get-deps-tests to get the SDK's test dependencies"
@echo " get-deps-verify to get the SDK's verification dependencies"
generate: gen-test gen-endpoints gen-services
gen-test: gen-protocol-test
gen-services:
go generate ./service
gen-protocol-test:
go generate ./private/protocol/...
gen-endpoints:
go generate ./private/endpoints
build:
@echo "go build SDK and vendor packages"
@go build ${SDK_ONLY_PKGS}
unit: get-deps-tests build verify
@echo "go test SDK and vendor packages"
@go test $(SDK_ONLY_PKGS)
unit-with-race-cover: get-deps-tests build verify
@echo "go test SDK and vendor packages"
@go test -race -cpu=1,2,4 $(SDK_ONLY_PKGS)
integration: get-deps-tests integ-custom smoke-tests performance
integ-custom:
go test -tags=integration ./awstesting/integration/customizations/...
smoke-tests: get-deps-tests
gucumber ./awstesting/integration/smoke
performance: get-deps-tests
AWS_TESTING_LOG_RESULTS=${log-detailed} AWS_TESTING_REGION=$(region) AWS_TESTING_DB_TABLE=$(table) gucumber ./awstesting/performance
sandbox-tests: sandbox-test-go14 sandbox-test-go15 sandbox-test-go15-novendorexp sandbox-test-go16 sandbox-test-go17 sandbox-test-gotip
sandbox-test-go14:
docker build -f ./awstesting/sandbox/Dockerfile.test.go1.4 -t "aws-sdk-go-1.4" .
docker run -t aws-sdk-go-1.4
sandbox-test-go15:
docker build -f ./awstesting/sandbox/Dockerfile.test.go1.5 -t "aws-sdk-go-1.5" .
docker run -t aws-sdk-go-1.5
sandbox-test-go15-novendorexp:
docker build -f ./awstesting/sandbox/Dockerfile.test.go1.5-novendorexp -t "aws-sdk-go-1.5-novendorexp" .
docker run -t aws-sdk-go-1.5-novendorexp
sandbox-test-go16:
docker build -f ./awstesting/sandbox/Dockerfile.test.go1.6 -t "aws-sdk-go-1.6" .
docker run -t aws-sdk-go-1.6
sandbox-test-go17:
docker build -f ./awstesting/sandbox/Dockerfile.test.go1.7 -t "aws-sdk-go-1.7" .
docker run -t aws-sdk-go-1.6
sandbox-test-gotip:
@echo "Run make update-aws-golang-tip, if this test fails because missing aws-golang:tip container"
docker build -f ./awstesting/sandbox/Dockerfile.test.gotip -t "aws-sdk-go-tip" .
docker run -t aws-sdk-go-tip
update-aws-golang-tip:
docker build -f ./awstesting/sandbox/Dockerfile.golang-tip -t "aws-golang:tip" .
verify: get-deps-verify lint vet
lint:
@echo "go lint SDK and vendor packages"
@lint=`if [ -z "${SDK_GO_1_4}" ]; then golint ./...; else echo "skipping golint"; fi`; \
lint=`echo "$$lint" | grep -E -v -e ${LINTIGNOREDOT} -e ${LINTIGNOREDOC} -e ${LINTIGNORECONST} -e ${LINTIGNORESTUTTER} -e ${LINTIGNOREINFLECT} -e ${LINTIGNOREDEPS} -e ${LINTIGNOREINFLECTS3UPLOAD}`; \
echo "$$lint"; \
if [ "$$lint" != "" ] && [ "$$lint" != "skipping golint" ]; then exit 1; fi
SDK_BASE_FOLDERS=$(shell ls -d */ | grep -v vendor | grep -v awsmigrate)
ifneq (,$(findstring go1.5, ${SDK_GO_VERSION}))
GO_VET_CMD=go tool vet --all -shadow
else ifneq (,$(findstring go1.6, ${SDK_GO_VERSION}))
GO_VET_CMD=go tool vet --all -shadow -example=false
else ifneq (,$(findstring devel, ${SDK_GO_VERSION}))
GO_VET_CMD=go tool vet --all -shadow -tests=false
else
GO_VET_CMD=echo skipping go vet, ${SDK_GO_VERSION}
endif
vet:
${GO_VET_CMD} ${SDK_BASE_FOLDERS}
get-deps: get-deps-tests get-deps-verify
@echo "go get SDK dependencies"
@go get -v $(SDK_ONLY_PKGS)
get-deps-tests:
@echo "go get SDK testing dependencies"
go get github.com/lsegal/gucumber/cmd/gucumber
go get github.com/stretchr/testify
go get github.com/smartystreets/goconvey
get-deps-verify:
@echo "go get SDK verification utilities"
@if [ -z "${SDK_GO_1_4}" ]; then go get github.com/golang/lint/golint; else echo "skipped getting golint"; fi
bench:
@echo "go bench SDK packages"
@go test -run NONE -bench . -benchmem -tags 'bench' $(SDK_ONLY_PKGS)
bench-protocol:
@echo "go bench SDK protocol marshallers"
@go test -run NONE -bench . -benchmem -tags 'bench' ./private/protocol/...
docs:
@echo "generate SDK docs"
rm -rf doc && bundle install && bundle exec yard
@# This env variable, DOCS, is for internal use
@if [ -n "$(AWS_DOC_GEN_TOOL)" ]; then echo "For internal use. Subject to change."; $(AWS_DOC_GEN_TOOL) `pwd`; fi
api_info:
@go run private/model/cli/api-info/api-info.go

3
vendor/github.com/aws/aws-sdk-go/NOTICE.txt

@ -0,0 +1,3 @@
AWS SDK for Go
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Copyright 2014-2015 Stripe, Inc.

108
vendor/github.com/aws/aws-sdk-go/README.md

@ -0,0 +1,108 @@
# AWS SDK for Go
<span style="display: inline-block;">
[![API Reference](http://img.shields.io/badge/api-reference-blue.svg)](http://docs.aws.amazon.com/sdk-for-go/api)
[![Join the chat at https://gitter.im/aws/aws-sdk-go](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/aws/aws-sdk-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Build Status](https://img.shields.io/travis/aws/aws-sdk-go.svg)](https://travis-ci.org/aws/aws-sdk-go)
[![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
</span>
aws-sdk-go is the official AWS SDK for the Go programming language.
Checkout our [release notes](https://github.com/aws/aws-sdk-go/releases) for information about the latest bug fixes, updates, and features added to the SDK.
## Installing
If you are using Go 1.5 with the `GO15VENDOREXPERIMENT=1` vendoring flag, or 1.6 and higher you can use the following command to retrieve the SDK. The SDK's non-testing dependencies will be included and are vendored in the `vendor` folder.
go get -u github.com/aws/aws-sdk-go
Otherwise if your Go environment does not have vendoring support enabled, or you do not want to include the vendored SDK's dependencies you can use the following command to retrieve the SDK and its non-testing dependencies using `go get`.
go get -u github.com/aws/aws-sdk-go/aws/...
go get -u github.com/aws/aws-sdk-go/service/...
If you're looking to retrieve just the SDK without any dependencies use the following command.
go get -d github.com/aws/aws-sdk-go/
These two processes will still include the `vendor` folder and it should be deleted if its not going to be used by your environment.
rm -rf $GOPATH/src/github.com/aws/aws-sdk-go/vendor
## Configuring Credentials
Before using the SDK, ensure that you've configured credentials. The best
way to configure credentials on a development machine is to use the
`~/.aws/credentials` file, which might look like:
```
[default]
aws_access_key_id = AKID1234567890
aws_secret_access_key = MY-SECRET-KEY
```
You can learn more about the credentials file from this
[blog post](http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs).
Alternatively, you can set the following environment variables:
```
AWS_ACCESS_KEY_ID=AKID1234567890
AWS_SECRET_ACCESS_KEY=MY-SECRET-KEY
```
### AWS CLI config file (`~/.aws/config`)
The AWS SDK for Go does not support the AWS CLI's config file. The SDK will not use any contents from this file. The SDK only supports the shared credentials file (`~/.aws/credentials`). #384 tracks this feature request discussion.
## Using the Go SDK
To use a service in the SDK, create a service variable by calling the `New()`
function. Once you have a service client, you can call API operations which each
return response data and a possible error.
To list a set of instance IDs from EC2, you could run:
```go
package main
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
)
func main() {
// Create an EC2 service object in the "us-west-2" region
// Note that you can also configure your region globally by
// exporting the AWS_REGION environment variable
svc := ec2.New(session.New(), &aws.Config{Region: aws.String("us-west-2")})
// Call the DescribeInstances Operation
resp, err := svc.DescribeInstances(nil)
if err != nil {
panic(err)
}
// resp has all of the response data, pull out instance IDs:
fmt.Println("> Number of reservation sets: ", len(resp.Reservations))
for idx, res := range resp.Reservations {
fmt.Println(" > Number of instances: ", len(res.Instances))
for _, inst := range resp.Reservations[idx].Instances {
fmt.Println(" - Instance ID: ", *inst.InstanceId)
}
}
}
```
You can find more information and operations in our
[API documentation](http://docs.aws.amazon.com/sdk-for-go/api/).
## License
This SDK is distributed under the
[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0),
see LICENSE.txt and NOTICE.txt for more information.

145
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go

@ -0,0 +1,145 @@
// Package awserr represents API error interface accessors for the SDK.
package awserr
// An Error wraps lower level errors with code, message and an original error.
// The underlying concrete error type may also satisfy other interfaces which
// can be to used to obtain more specific information about the error.
//
// Calling Error() or String() will always include the full information about
// an error based on its underlying type.
//
// Example:
//
// output, err := s3manage.Upload(svc, input, opts)
// if err != nil {
// if awsErr, ok := err.(awserr.Error); ok {
// // Get error details
// log.Println("Error:", awsErr.Code(), awsErr.Message())
//
// // Prints out full error message, including original error if there was one.
// log.Println("Error:", awsErr.Error())
//
// // Get original error
// if origErr := awsErr.OrigErr(); origErr != nil {
// // operate on original error.
// }
// } else {
// fmt.Println(err.Error())
// }
// }
//
type Error interface {
// Satisfy the generic error interface.
error
// Returns the short phrase depicting the classification of the error.
Code() string
// Returns the error details message.
Message() string
// Returns the original error if one was set. Nil is returned if not set.
OrigErr() error
}
// BatchError is a batch of errors which also wraps lower level errors with
// code, message, and original errors. Calling Error() will include all errors
// that occured in the batch.
//
// Deprecated: Replaced with BatchedErrors. Only defined for backwards
// compatibility.
type BatchError interface {
// Satisfy the generic error interface.
error
// Returns the short phrase depicting the classification of the error.
Code() string
// Returns the error details message.
Message() string
// Returns the original error if one was set. Nil is returned if not set.
OrigErrs() []error
}
// BatchedErrors is a batch of errors which also wraps lower level errors with
// code, message, and original errors. Calling Error() will include all errors
// that occured in the batch.
//
// Replaces BatchError
type BatchedErrors interface {
// Satisfy the base Error interface.
Error
// Returns the original error if one was set. Nil is returned if not set.
OrigErrs() []error
}
// New returns an Error object described by the code, message, and origErr.
//
// If origErr satisfies the Error interface it will not be wrapped within a new
// Error object and will instead be returned.
func New(code, message string, origErr error) Error {
var errs []error
if origErr != nil {
errs = append(errs, origErr)
}
return newBaseError(code, message, errs)
}
// NewBatchError returns an BatchedErrors with a collection of errors as an
// array of errors.
func NewBatchError(code, message string, errs []error) BatchedErrors {
return newBaseError(code, message, errs)
}
// A RequestFailure is an interface to extract request failure information from
// an Error such as the request ID of the failed request returned by a service.
// RequestFailures may not always have a requestID value if the request failed
// prior to reaching the service such as a connection error.
//
// Example:
//
// output, err := s3manage.Upload(svc, input, opts)
// if err != nil {
// if reqerr, ok := err.(RequestFailure); ok {
// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
// } else {
// log.Println("Error:", err.Error())
// }
// }
//
// Combined with awserr.Error:
//
// output, err := s3manage.Upload(svc, input, opts)
// if err != nil {
// if awsErr, ok := err.(awserr.Error); ok {
// // Generic AWS Error with Code, Message, and original error (if any)
// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
//
// if reqErr, ok := err.(awserr.RequestFailure); ok {
// // A service error occurred
// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
// }
// } else {
// fmt.Println(err.Error())
// }
// }
//
type RequestFailure interface {
Error
// The status code of the HTTP response.
StatusCode() int
// The request ID returned by the service for a request failure. This will
// be empty if no request ID is available such as the request failed due
// to a connection error.
RequestID() string
}
// NewRequestFailure returns a new request error wrapper for the given Error
// provided.
func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
return newRequestError(err, statusCode, reqID)
}

194
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go

@ -0,0 +1,194 @@
package awserr
import "fmt"
// SprintError returns a string of the formatted error code.
//
// Both extra and origErr are optional. If they are included their lines
// will be added, but if they are not included their lines will be ignored.
func SprintError(code, message, extra string, origErr error) string {
msg := fmt.Sprintf("%s: %s", code, message)
if extra != "" {
msg = fmt.Sprintf("%s\n\t%s", msg, extra)
}
if origErr != nil {
msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
}
return msg
}
// A baseError wraps the code and message which defines an error. It also
// can be used to wrap an original error object.
//
// Should be used as the root for errors satisfying the awserr.Error. Also
// for any error which does not fit into a specific error wrapper type.
type baseError struct {
// Classification of error
code string
// Detailed information about error
message string
// Optional original error this error is based off of. Allows building
// chained errors.
errs []error
}
// newBaseError returns an error object for the code, message, and errors.
//
// code is a short no whitespace phrase depicting the classification of
// the error that is being created.
//
// message is the free flow string containing detailed information about the
// error.
//
// origErrs is the error objects which will be nested under the new errors to
// be returned.
func newBaseError(code, message string, origErrs []error) *baseError {
b := &baseError{
code: code,
message: message,
errs: origErrs,
}
return b
}
// Error returns the string representation of the error.
//
// See ErrorWithExtra for formatting.
//
// Satisfies the error interface.
func (b baseError) Error() string {
size := len(b.errs)
if size > 0 {
return SprintError(b.code, b.message, "", errorList(b.errs))
}
return SprintError(b.code, b.message, "", nil)
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (b baseError) String() string {
return b.Error()
}
// Code returns the short phrase depicting the classification of the error.
func (b baseError) Code() string {
return b.code
}
// Message returns the error details message.
func (b baseError) Message() string {
return b.message
}
// OrigErr returns the original error if one was set. Nil is returned if no
// error was set. This only returns the first element in the list. If the full
// list is needed, use BatchedErrors.
func (b baseError) OrigErr() error {
switch len(b.errs) {
case 0:
return nil
case 1:
return b.errs[0]
default:
if err, ok := b.errs[0].(Error); ok {
return NewBatchError(err.Code(), err.Message(), b.errs[1:])
}
return NewBatchError("BatchedErrors",
"multiple errors occured", b.errs)
}
}
// OrigErrs returns the original errors if one was set. An empty slice is
// returned if no error was set.
func (b baseError) OrigErrs() []error {
return b.errs
}
// So that the Error interface type can be included as an anonymous field
// in the requestError struct and not conflict with the error.Error() method.
type awsError Error
// A requestError wraps a request or service error.
//
// Composed of baseError for code, message, and original error.
type requestError struct {
awsError
statusCode int
requestID string
}
// newRequestError returns a wrapped error with additional information for
// request status code, and service requestID.
//
// Should be used to wrap all request which involve service requests. Even if
// the request failed without a service response, but had an HTTP status code
// that may be meaningful.
//
// Also wraps original errors via the baseError.
func newRequestError(err Error, statusCode int, requestID string) *requestError {
return &requestError{
awsError: err,
statusCode: statusCode,
requestID: requestID,
}
}
// Error returns the string representation of the error.
// Satisfies the error interface.
func (r requestError) Error() string {
extra := fmt.Sprintf("status code: %d, request id: %s",
r.statusCode, r.requestID)
return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (r requestError) String() string {
return r.Error()
}
// StatusCode returns the wrapped status code for the error
func (r requestError) StatusCode() int {
return r.statusCode
}
// RequestID returns the wrapped requestID
func (r requestError) RequestID() string {
return r.requestID
}
// OrigErrs returns the original errors if one was set. An empty slice is
// returned if no error was set.
func (r requestError) OrigErrs() []error {
if b, ok := r.awsError.(BatchedErrors); ok {
return b.OrigErrs()
}
return []error{r.OrigErr()}
}
// An error list that satisfies the golang interface
type errorList []error
// Error returns the string representation of the error.
//
// Satisfies the error interface.
func (e errorList) Error() string {
msg := ""
// How do we want to handle the array size being zero
if size := len(e); size > 0 {
for i := 0; i < size; i++ {
msg += fmt.Sprintf("%s", e[i].Error())
// We check the next index to see if it is within the slice.
// If it is, then we append a newline. We do this, because unit tests
// could be broken with the additional '\n'
if i+1 < size {
msg += "\n"
}
}
}
return msg
}

100
vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go

@ -0,0 +1,100 @@
package awsutil
import (
"io"
"reflect"
)
// Copy deeply copies a src structure to dst. Useful for copying request and
// response structures.
//
// Can copy between structs of different type, but will only copy fields which
// are assignable, and exist in both structs. Fields which are not assignable,
// or do not exist in both structs are ignored.
func Copy(dst, src interface{}) {
dstval := reflect.ValueOf(dst)
if !dstval.IsValid() {
panic("Copy dst cannot be nil")
}
rcopy(dstval, reflect.ValueOf(src), true)
}
// CopyOf returns a copy of src while also allocating the memory for dst.
// src must be a pointer type or this operation will fail.
func CopyOf(src interface{}) (dst interface{}) {
dsti := reflect.New(reflect.TypeOf(src).Elem())
dst = dsti.Interface()
rcopy(dsti, reflect.ValueOf(src), true)
return
}
// rcopy performs a recursive copy of values from the source to destination.
//
// root is used to skip certain aspects of the copy which are not valid
// for the root node of a object.
func rcopy(dst, src reflect.Value, root bool) {
if !src.IsValid() {
return
}
switch src.Kind() {
case reflect.Ptr:
if _, ok := src.Interface().(io.Reader); ok {
if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
dst.Elem().Set(src)
} else if dst.CanSet() {
dst.Set(src)
}
} else {
e := src.Type().Elem()
if dst.CanSet() && !src.IsNil() {
dst.Set(reflect.New(e))
}
if src.Elem().IsValid() {
// Keep the current root state since the depth hasn't changed
rcopy(dst.Elem(), src.Elem(), root)
}
}
case reflect.Struct:
t := dst.Type()
for i := 0; i < t.NumField(); i++ {
name := t.Field(i).Name
srcVal := src.FieldByName(name)
dstVal := dst.FieldByName(name)
if srcVal.IsValid() && dstVal.CanSet() {
rcopy(dstVal, srcVal, false)
}
}
case reflect.Slice:
if src.IsNil() {
break
}
s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
dst.Set(s)
for i := 0; i < src.Len(); i++ {
rcopy(dst.Index(i), src.Index(i), false)
}
case reflect.Map:
if src.IsNil() {
break
}
s := reflect.MakeMap(src.Type())
dst.Set(s)
for _, k := range src.MapKeys() {
v := src.MapIndex(k)
v2 := reflect.New(v.Type()).Elem()
rcopy(v2, v, false)
dst.SetMapIndex(k, v2)
}
default:
// Assign the value if possible. If its not assignable, the value would
// need to be converted and the impact of that may be unexpected, or is
// not compatible with the dst type.
if src.Type().AssignableTo(dst.Type()) {
dst.Set(src)
}
}
}

27
vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go

@ -0,0 +1,27 @@
package awsutil
import (
"reflect"
)
// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
// In addition to this, this method will also dereference the input values if
// possible so the DeepEqual performed will not fail if one parameter is a
// pointer and the other is not.
//
// DeepEqual will not perform indirection of nested values of the input parameters.
func DeepEqual(a, b interface{}) bool {
ra := reflect.Indirect(reflect.ValueOf(a))
rb := reflect.Indirect(reflect.ValueOf(b))
if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
// If the elements are both nil, and of the same type the are equal
// If they are of different types they are not equal
return reflect.TypeOf(a) == reflect.TypeOf(b)
} else if raValid != rbValid {
// Both values must be valid to be equal
return false
}
return reflect.DeepEqual(ra.Interface(), rb.Interface())
}

222
vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go

@ -0,0 +1,222 @@
package awsutil
import (
"reflect"
"regexp"
"strconv"
"strings"
"github.com/jmespath/go-jmespath"
)
var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
// rValuesAtPath returns a slice of values found in value v. The values
// in v are explored recursively so all nested values are collected.
func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
pathparts := strings.Split(path, "||")
if len(pathparts) > 1 {
for _, pathpart := range pathparts {
vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
if len(vals) > 0 {
return vals
}
}
return nil
}
values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
components := strings.Split(path, ".")
for len(values) > 0 && len(components) > 0 {
var index *int64
var indexStar bool
c := strings.TrimSpace(components[0])
if c == "" { // no actual component, illegal syntax
return nil
} else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
// TODO normalize case for user
return nil // don't support unexported fields
}
// parse this component
if m := indexRe.FindStringSubmatch(c); m != nil {
c = m[1]
if m[2] == "" {
index = nil
indexStar = true
} else {
i, _ := strconv.ParseInt(m[2], 10, 32)
index = &i
indexStar = false
}
}
nextvals := []reflect.Value{}
for _, value := range values {
// pull component name out of struct member
if value.Kind() != reflect.Struct {
continue
}
if c == "*" { // pull all members
for i := 0; i < value.NumField(); i++ {
if f := reflect.Indirect(value.Field(i)); f.IsValid() {
nextvals = append(nextvals, f)
}
}
continue
}
value = value.FieldByNameFunc(func(name string) bool {
if c == name {
return true
} else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
return true
}
return false
})
if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
if !value.IsNil() {
value.Set(reflect.Zero(value.Type()))
}
return []reflect.Value{value}
}
if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
// TODO if the value is the terminus it should not be created
// if the value to be set to its position is nil.
value.Set(reflect.New(value.Type().Elem()))
value = value.Elem()
} else {
value = reflect.Indirect(value)
}
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
if !createPath && value.IsNil() {
value = reflect.ValueOf(nil)
}
}
if value.IsValid() {
nextvals = append(nextvals, value)
}
}
values = nextvals
if indexStar || index != nil {
nextvals = []reflect.Value{}
for _, value := range values {
value := reflect.Indirect(value)
if value.Kind() != reflect.Slice {
continue
}
if indexStar { // grab all indices
for i := 0; i < value.Len(); i++ {
idx := reflect.Indirect(value.Index(i))
if idx.IsValid() {
nextvals = append(nextvals, idx)
}
}
continue
}
// pull out index
i := int(*index)
if i >= value.Len() { // check out of bounds
if createPath {
// TODO resize slice
} else {
continue
}
} else if i < 0 { // support negative indexing
i = value.Len() + i
}
value = reflect.Indirect(value.Index(i))
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
if !createPath && value.IsNil() {
value = reflect.ValueOf(nil)
}
}
if value.IsValid() {
nextvals = append(nextvals, value)
}
}
values = nextvals
}
components = components[1:]
}
return values
}
// ValuesAtPath returns a list of values at the case insensitive lexical
// path inside of a structure.
func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
result, err := jmespath.Search(path, i)
if err != nil {
return nil, err
}
v := reflect.ValueOf(result)
if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
return nil, nil
}
if s, ok := result.([]interface{}); ok {
return s, err
}
if v.Kind() == reflect.Map && v.Len() == 0 {
return nil, nil
}
if v.Kind() == reflect.Slice {
out := make([]interface{}, v.Len())
for i := 0; i < v.Len(); i++ {
out[i] = v.Index(i).Interface()
}
return out, nil
}
return []interface{}{result}, nil
}
// SetValueAtPath sets a value at the case insensitive lexical path inside
// of a structure.
func SetValueAtPath(i interface{}, path string, v interface{}) {
if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
for _, rval := range rvals {
if rval.Kind() == reflect.Ptr && rval.IsNil() {
continue
}
setValue(rval, v)
}
}
}
func setValue(dstVal reflect.Value, src interface{}) {
if dstVal.Kind() == reflect.Ptr {
dstVal = reflect.Indirect(dstVal)
}
srcVal := reflect.ValueOf(src)
if !srcVal.IsValid() { // src is literal nil
if dstVal.CanAddr() {
// Convert to pointer so that pointer's value can be nil'ed
// dstVal = dstVal.Addr()
}
dstVal.Set(reflect.Zero(dstVal.Type()))
} else if srcVal.Kind() == reflect.Ptr {
if srcVal.IsNil() {
srcVal = reflect.Zero(dstVal.Type())
} else {
srcVal = reflect.ValueOf(src).Elem()
}
dstVal.Set(srcVal)
} else {
dstVal.Set(srcVal)
}
}

107
vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go

@ -0,0 +1,107 @@
package awsutil
import (
"bytes"
"fmt"
"io"
"reflect"
"strings"
)
// Prettify returns the string representation of a value.
func Prettify(i interface{}) string {
var buf bytes.Buffer
prettify(reflect.ValueOf(i), 0, &buf)
return buf.String()
}
// prettify will recursively walk value v to build a textual
// representation of the value.
func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
switch v.Kind() {
case reflect.Struct:
strtype := v.Type().String()
if strtype == "time.Time" {
fmt.Fprintf(buf, "%s", v.Interface())
break
} else if strings.HasPrefix(strtype, "io.") {
buf.WriteString("<buffer>")
break
}
buf.WriteString("{\n")
names := []string{}
for i := 0; i < v.Type().NumField(); i++ {
name := v.Type().Field(i).Name
f := v.Field(i)
if name[0:1] == strings.ToLower(name[0:1]) {
continue // ignore unexported fields
}
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
continue // ignore unset fields
}
names = append(names, name)
}
for i, n := range names {
val := v.FieldByName(n)
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(n + ": ")
prettify(val, indent+2, buf)
if i < len(names)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
case reflect.Slice:
nl, id, id2 := "", "", ""
if v.Len() > 3 {
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
}
buf.WriteString("[" + nl)
for i := 0; i < v.Len(); i++ {
buf.WriteString(id2)
prettify(v.Index(i), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString("," + nl)
}
}
buf.WriteString(nl + id + "]")
case reflect.Map:
buf.WriteString("{\n")
for i, k := range v.MapKeys() {
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(k.String() + ": ")
prettify(v.MapIndex(k), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
default:
if !v.IsValid() {
fmt.Fprint(buf, "<invalid value>")
return
}
format := "%v"
switch v.Interface().(type) {
case string:
format = "%q"
case io.ReadSeeker, io.Reader:
format = "buffer(%p)"
}
fmt.Fprintf(buf, format, v.Interface())
}
}

89
vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go

@ -0,0 +1,89 @@
package awsutil
import (
"bytes"
"fmt"
"reflect"
"strings"
)
// StringValue returns the string representation of a value.
func StringValue(i interface{}) string {
var buf bytes.Buffer
stringValue(reflect.ValueOf(i), 0, &buf)
return buf.String()
}
func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
switch v.Kind() {
case reflect.Struct:
buf.WriteString("{\n")
names := []string{}
for i := 0; i < v.Type().NumField(); i++ {
name := v.Type().Field(i).Name
f := v.Field(i)
if name[0:1] == strings.ToLower(name[0:1]) {
continue // ignore unexported fields
}
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
continue // ignore unset fields
}
names = append(names, name)
}
for i, n := range names {
val := v.FieldByName(n)
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(n + ": ")
stringValue(val, indent+2, buf)
if i < len(names)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
case reflect.Slice:
nl, id, id2 := "", "", ""
if v.Len() > 3 {
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
}
buf.WriteString("[" + nl)
for i := 0; i < v.Len(); i++ {
buf.WriteString(id2)
stringValue(v.Index(i), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString("," + nl)
}
}
buf.WriteString(nl + id + "]")
case reflect.Map:
buf.WriteString("{\n")
for i, k := range v.MapKeys() {
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(k.String() + ": ")
stringValue(v.MapIndex(k), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
default:
format := "%v"
switch v.Interface().(type) {
case string:
format = "%q"
}
fmt.Fprintf(buf, format, v.Interface())
}
}

120
vendor/github.com/aws/aws-sdk-go/aws/client/client.go

@ -0,0 +1,120 @@
package client
import (
"fmt"
"io/ioutil"
"net/http/httputil"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
)
// A Config provides configuration to a service client instance.
type Config struct {
Config *aws.Config
Handlers request.Handlers
Endpoint, SigningRegion string
}
// ConfigProvider provides a generic way for a service client to receive
// the ClientConfig without circular dependencies.
type ConfigProvider interface {
ClientConfig(serviceName string, cfgs ...*aws.Config) Config
}
// A Client implements the base client request and response handling
// used by all service clients.
type Client struct {
request.Retryer
metadata.ClientInfo
Config aws.Config
Handlers request.Handlers
}
// New will return a pointer to a new initialized service client.
func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
svc := &Client{
Config: cfg,
ClientInfo: info,
Handlers: handlers,
}
switch retryer, ok := cfg.Retryer.(request.Retryer); {
case ok:
svc.Retryer = retryer
case cfg.Retryer != nil && cfg.Logger != nil:
s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
cfg.Logger.Log(s)
fallthrough
default:
maxRetries := aws.IntValue(cfg.MaxRetries)
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
maxRetries = 3
}
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
}
svc.AddDebugHandlers()
for _, option := range options {
option(svc)
}
return svc
}
// NewRequest returns a new Request pointer for the service API
// operation and parameters.
func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
}
// AddDebugHandlers injects debug logging handlers into the service to log request
// debug information.
func (c *Client) AddDebugHandlers() {
if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
return
}
c.Handlers.Send.PushFront(logRequest)
c.Handlers.Send.PushBack(logResponse)
}
const logReqMsg = `DEBUG: Request %s/%s Details:
---[ REQUEST POST-SIGN ]-----------------------------
%s
-----------------------------------------------------`
func logRequest(r *request.Request) {
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
if logBody {
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
// Body as a NoOpCloser and will not be reset after read by the HTTP
// client reader.
r.Body.Seek(r.BodyStart, 0)
r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
}
r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
}
const logRespMsg = `DEBUG: Response %s/%s Details:
---[ RESPONSE ]--------------------------------------
%s
-----------------------------------------------------`
func logResponse(r *request.Request) {
var msg = "no response data"
if r.HTTPResponse != nil {
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)
msg = string(dumpedBody)
} else if r.Error != nil {
msg = r.Error.Error()
}
r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
}

90
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go

@ -0,0 +1,90 @@
package client
import (
"math/rand"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws/request"
)
// DefaultRetryer implements basic retry logic using exponential backoff for
// most services. If you want to implement custom retry logic, implement the
// request.Retryer interface or create a structure type that composes this
// struct and override the specific methods. For example, to override only
// the MaxRetries method:
//
// type retryer struct {
// service.DefaultRetryer
// }
//
// // This implementation always has 100 max retries
// func (d retryer) MaxRetries() uint { return 100 }
type DefaultRetryer struct {
NumMaxRetries int
}
// MaxRetries returns the number of maximum returns the service will use to make
// an individual API request.
func (d DefaultRetryer) MaxRetries() int {
return d.NumMaxRetries
}
var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
// RetryRules returns the delay duration before retrying this request again
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
// Set the upper limit of delay in retrying at ~five minutes
minTime := 30
throttle := d.shouldThrottle(r)
if throttle {
minTime = 500
}
retryCount := r.RetryCount
if retryCount > 13 {
retryCount = 13
} else if throttle && retryCount > 8 {
retryCount = 8
}
delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
return time.Duration(delay) * time.Millisecond
}
// ShouldRetry returns true if the request should be retried.
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
if r.HTTPResponse.StatusCode >= 500 {
return true
}
return r.IsErrorRetryable() || d.shouldThrottle(r)
}
// ShouldThrottle returns true if the request should be throttled.
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
if r.HTTPResponse.StatusCode == 502 ||
r.HTTPResponse.StatusCode == 503 ||
r.HTTPResponse.StatusCode == 504 {
return true
}
return r.IsErrorThrottle()
}
// lockedSource is a thread-safe implementation of rand.Source
type lockedSource struct {
lk sync.Mutex
src rand.Source
}
func (r *lockedSource) Int63() (n int64) {
r.lk.Lock()
n = r.src.Int63()
r.lk.Unlock()
return
}
func (r *lockedSource) Seed(seed int64) {
r.lk.Lock()
r.src.Seed(seed)
r.lk.Unlock()
}

12
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go

@ -0,0 +1,12 @@
package metadata
// ClientInfo wraps immutable data from the client.Client structure.
type ClientInfo struct {
ServiceName string
APIVersion string
Endpoint string
SigningName string
SigningRegion string
JSONVersion string
TargetPrefix string
}

358
vendor/github.com/aws/aws-sdk-go/aws/config.go

@ -0,0 +1,358 @@
package aws
import (
"net/http"
"time"
"github.com/aws/aws-sdk-go/aws/credentials"
)
// UseServiceDefaultRetries instructs the config to use the service's own default
// number of retries. This will be the default action if Config.MaxRetries
// is nil also.
const UseServiceDefaultRetries = -1
// RequestRetryer is an alias for a type that implements the request.Retryer interface.
type RequestRetryer interface{}
// A Config provides service configuration for service clients. By default,
// all clients will use the {defaults.DefaultConfig} structure.
type Config struct {
// Enables verbose error printing of all credential chain errors.
// Should be used when wanting to see all errors while attempting to retreive
// credentials.
CredentialsChainVerboseErrors *bool
// The credentials object to use when signing requests. Defaults to
// a chain of credential providers to search for credentials in environment
// variables, shared credential file, and EC2 Instance Roles.
Credentials *credentials.Credentials
// An optional endpoint URL (hostname only or fully qualified URI)
// that overrides the default generated endpoint for a client. Set this
// to `""` to use the default generated endpoint.
//
// @note You must still provide a `Region` value when specifying an
// endpoint for a client.
Endpoint *string
// The region to send requests to. This parameter is required and must
// be configured globally or on a per-client basis unless otherwise
// noted. A full list of regions is found in the "Regions and Endpoints"
// document.
//
// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
// AWS Regions and Endpoints
Region *string
// Set this to `true` to disable SSL when sending requests. Defaults
// to `false`.
DisableSSL *bool
// The HTTP client to use when sending requests. Defaults to
// `http.DefaultClient`.
HTTPClient *http.Client
// An integer value representing the logging level. The default log level
// is zero (LogOff), which represents no logging. To enable logging set
// to a LogLevel Value.
LogLevel *LogLevelType
// The logger writer interface to write logging messages to. Defaults to
// standard out.
Logger Logger
// The maximum number of times that a request will be retried for failures.
// Defaults to -1, which defers the max retry setting to the service specific
// configuration.
MaxRetries *int
// Retryer guides how HTTP requests should be retried in case of recoverable failures.
//
// When nil or the value does not implement the request.Retryer interface,
// the request.DefaultRetryer will be used.
//
// When both Retryer and MaxRetries are non-nil, the former is used and
// the latter ignored.
//
// To set the Retryer field in a type-safe manner and with chaining, use
// the request.WithRetryer helper function:
//
// cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
//
Retryer RequestRetryer
// Disables semantic parameter validation, which validates input for missing
// required fields and/or other semantic request input errors.
DisableParamValidation *bool
// Disables the computation of request and response checksums, e.g.,
// CRC32 checksums in Amazon DynamoDB.
DisableComputeChecksums *bool
// Set this to `true` to force the request to use path-style addressing,
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will
// use virtual hosted bucket addressing when possible
// (`http://BUCKET.s3.amazonaws.com/KEY`).
//
// @note This configuration option is specific to the Amazon S3 service.
// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
// Amazon S3: Virtual Hosting of Buckets
S3ForcePathStyle *bool
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
// header to PUT requests over 2MB of content. 100-Continue instructs the
// HTTP client not to send the body until the service responds with a
// `continue` status. This is useful to prevent sending the request body
// until after the request is authenticated, and validated.
//
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
//
// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
// `ExpectContinueTimeout` for information on adjusting the continue wait timeout.
// https://golang.org/pkg/net/http/#Transport
//
// You should use this flag to disble 100-Continue if you experiance issues
// with proxies or thrid party S3 compatible services.
S3Disable100Continue *bool
// Set this to `true` to enable S3 Accelerate feature. For all operations compatible
// with S3 Accelerate will use the accelerate endpoint for requests. Requests not compatible
// will fall back to normal S3 requests.
//
// The bucket must be enable for accelerate to be used with S3 client with accelerate
// enabled. If the bucket is not enabled for accelerate an error will be returned.
// The bucket name must be DNS compatible to also work with accelerate.
S3UseAccelerate *bool
// Set this to `true` to disable the EC2Metadata client from overriding the
// default http.Client's Timeout. This is helpful if you do not want the EC2Metadata
// client to create a new http.Client. This options is only meaningful if you're not
// already using a custom HTTP client with the SDK. Enabled by default.
//
// Must be set and provided to the session.New() in order to disable the EC2Metadata
// overriding the timeout for default credentials chain.
//
// Example:
// sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true))
// svc := s3.New(sess)
//
EC2MetadataDisableTimeoutOverride *bool
SleepDelay func(time.Duration)
}
// NewConfig returns a new Config pointer that can be chained with builder methods to
// set multiple configuration values inline without using pointers.
//
// sess := session.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10))
//
func NewConfig() *Config {
return &Config{}
}
// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
// a Config pointer.
func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
c.CredentialsChainVerboseErrors = &verboseErrs
return c
}
// WithCredentials sets a config Credentials value returning a Config pointer
// for chaining.
func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
c.Credentials = creds
return c
}
// WithEndpoint sets a config Endpoint value returning a Config pointer for
// chaining.
func (c *Config) WithEndpoint(endpoint string) *Config {
c.Endpoint = &endpoint
return c
}
// WithRegion sets a config Region value returning a Config pointer for
// chaining.
func (c *Config) WithRegion(region string) *Config {
c.Region = &region
return c
}
// WithDisableSSL sets a config DisableSSL value returning a Config pointer
// for chaining.
func (c *Config) WithDisableSSL(disable bool) *Config {
c.DisableSSL = &disable
return c
}
// WithHTTPClient sets a config HTTPClient value returning a Config pointer
// for chaining.
func (c *Config) WithHTTPClient(client *http.Client) *Config {
c.HTTPClient = client
return c
}
// WithMaxRetries sets a config MaxRetries value returning a Config pointer
// for chaining.
func (c *Config) WithMaxRetries(max int) *Config {
c.MaxRetries = &max
return c
}
// WithDisableParamValidation sets a config DisableParamValidation value
// returning a Config pointer for chaining.
func (c *Config) WithDisableParamValidation(disable bool) *Config {
c.DisableParamValidation = &disable
return c
}
// WithDisableComputeChecksums sets a config DisableComputeChecksums value
// returning a Config pointer for chaining.
func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
c.DisableComputeChecksums = &disable
return c
}
// WithLogLevel sets a config LogLevel value returning a Config pointer for
// chaining.
func (c *Config) WithLogLevel(level LogLevelType) *Config {
c.LogLevel = &level
return c
}
// WithLogger sets a config Logger value returning a Config pointer for
// chaining.
func (c *Config) WithLogger(logger Logger) *Config {
c.Logger = logger
return c
}
// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
// pointer for chaining.
func (c *Config) WithS3ForcePathStyle(force bool) *Config {
c.S3ForcePathStyle = &force
return c
}
// WithS3Disable100Continue sets a config S3Disable100Continue value returning
// a Config pointer for chaining.
func (c *Config) WithS3Disable100Continue(disable bool) *Config {
c.S3Disable100Continue = &disable
return c
}
// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
// pointer for chaining.
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
c.S3UseAccelerate = &enable
return c
}
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
// returning a Config pointer for chaining.
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
c.EC2MetadataDisableTimeoutOverride = &enable
return c
}
// WithSleepDelay overrides the function used to sleep while waiting for the
// next retry. Defaults to time.Sleep.
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
c.SleepDelay = fn
return c
}
// MergeIn merges the passed in configs into the existing config object.
func (c *Config) MergeIn(cfgs ...*Config) {
for _, other := range cfgs {
mergeInConfig(c, other)
}
}
func mergeInConfig(dst *Config, other *Config) {
if other == nil {
return
}
if other.CredentialsChainVerboseErrors != nil {
dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
}
if other.Credentials != nil {
dst.Credentials = other.Credentials
}
if other.Endpoint != nil {
dst.Endpoint = other.Endpoint
}
if other.Region != nil {
dst.Region = other.Region
}
if other.DisableSSL != nil {
dst.DisableSSL = other.DisableSSL
}
if other.HTTPClient != nil {
dst.HTTPClient = other.HTTPClient
}
if other.LogLevel != nil {
dst.LogLevel = other.LogLevel
}
if other.Logger != nil {
dst.Logger = other.Logger
}
if other.MaxRetries != nil {
dst.MaxRetries = other.MaxRetries
}
if other.Retryer != nil {
dst.Retryer = other.Retryer
}
if other.DisableParamValidation != nil {
dst.DisableParamValidation = other.DisableParamValidation
}
if other.DisableComputeChecksums != nil {
dst.DisableComputeChecksums = other.DisableComputeChecksums
}
if other.S3ForcePathStyle != nil {
dst.S3ForcePathStyle = other.S3ForcePathStyle
}
if other.S3Disable100Continue != nil {
dst.S3Disable100Continue = other.S3Disable100Continue
}
if other.S3UseAccelerate != nil {
dst.S3UseAccelerate = other.S3UseAccelerate
}
if other.EC2MetadataDisableTimeoutOverride != nil {
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
}
if other.SleepDelay != nil {
dst.SleepDelay = other.SleepDelay
}
}
// Copy will return a shallow copy of the Config object. If any additional
// configurations are provided they will be merged into the new config returned.
func (c *Config) Copy(cfgs ...*Config) *Config {
dst := &Config{}
dst.MergeIn(c)
for _, cfg := range cfgs {
dst.MergeIn(cfg)
}
return dst
}

369
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go

@ -0,0 +1,369 @@
package aws
import "time"
// String returns a pointer to the string value passed in.
func String(v string) *string {
return &v
}
// StringValue returns the value of the string pointer passed in or
// "" if the pointer is nil.
func StringValue(v *string) string {
if v != nil {
return *v
}
return ""
}
// StringSlice converts a slice of string values into a slice of
// string pointers
func StringSlice(src []string) []*string {
dst := make([]*string, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// StringValueSlice converts a slice of string pointers into a slice of
// string values
func StringValueSlice(src []*string) []string {
dst := make([]string, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// StringMap converts a string map of string values into a string
// map of string pointers
func StringMap(src map[string]string) map[string]*string {
dst := make(map[string]*string)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// StringValueMap converts a string map of string pointers into a string
// map of string values
func StringValueMap(src map[string]*string) map[string]string {
dst := make(map[string]string)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Bool returns a pointer to the bool value passed in.
func Bool(v bool) *bool {
return &v
}
// BoolValue returns the value of the bool pointer passed in or
// false if the pointer is nil.
func BoolValue(v *bool) bool {
if v != nil {
return *v
}
return false
}
// BoolSlice converts a slice of bool values into a slice of
// bool pointers
func BoolSlice(src []bool) []*bool {
dst := make([]*bool, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// BoolValueSlice converts a slice of bool pointers into a slice of
// bool values
func BoolValueSlice(src []*bool) []bool {
dst := make([]bool, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// BoolMap converts a string map of bool values into a string
// map of bool pointers
func BoolMap(src map[string]bool) map[string]*bool {
dst := make(map[string]*bool)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// BoolValueMap converts a string map of bool pointers into a string
// map of bool values
func BoolValueMap(src map[string]*bool) map[string]bool {
dst := make(map[string]bool)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int returns a pointer to the int value passed in.
func Int(v int) *int {
return &v
}
// IntValue returns the value of the int pointer passed in or
// 0 if the pointer is nil.
func IntValue(v *int) int {
if v != nil {
return *v
}
return 0
}
// IntSlice converts a slice of int values into a slice of
// int pointers
func IntSlice(src []int) []*int {
dst := make([]*int, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// IntValueSlice converts a slice of int pointers into a slice of
// int values
func IntValueSlice(src []*int) []int {
dst := make([]int, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// IntMap converts a string map of int values into a string
// map of int pointers
func IntMap(src map[string]int) map[string]*int {
dst := make(map[string]*int)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// IntValueMap converts a string map of int pointers into a string
// map of int values
func IntValueMap(src map[string]*int) map[string]int {
dst := make(map[string]int)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int64 returns a pointer to the int64 value passed in.
func Int64(v int64) *int64 {
return &v
}
// Int64Value returns the value of the int64 pointer passed in or
// 0 if the pointer is nil.
func Int64Value(v *int64) int64 {
if v != nil {
return *v
}
return 0
}
// Int64Slice converts a slice of int64 values into a slice of
// int64 pointers
func Int64Slice(src []int64) []*int64 {
dst := make([]*int64, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int64ValueSlice converts a slice of int64 pointers into a slice of
// int64 values
func Int64ValueSlice(src []*int64) []int64 {
dst := make([]int64, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int64Map converts a string map of int64 values into a string
// map of int64 pointers
func Int64Map(src map[string]int64) map[string]*int64 {
dst := make(map[string]*int64)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int64ValueMap converts a string map of int64 pointers into a string
// map of int64 values
func Int64ValueMap(src map[string]*int64) map[string]int64 {
dst := make(map[string]int64)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Float64 returns a pointer to the float64 value passed in.
func Float64(v float64) *float64 {
return &v
}
// Float64Value returns the value of the float64 pointer passed in or
// 0 if the pointer is nil.
func Float64Value(v *float64) float64 {
if v != nil {
return *v
}
return 0
}
// Float64Slice converts a slice of float64 values into a slice of
// float64 pointers
func Float64Slice(src []float64) []*float64 {
dst := make([]*float64, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Float64ValueSlice converts a slice of float64 pointers into a slice of
// float64 values
func Float64ValueSlice(src []*float64) []float64 {
dst := make([]float64, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Float64Map converts a string map of float64 values into a string
// map of float64 pointers
func Float64Map(src map[string]float64) map[string]*float64 {
dst := make(map[string]*float64)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Float64ValueMap converts a string map of float64 pointers into a string
// map of float64 values
func Float64ValueMap(src map[string]*float64) map[string]float64 {
dst := make(map[string]float64)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Time returns a pointer to the time.Time value passed in.
func Time(v time.Time) *time.Time {
return &v
}
// TimeValue returns the value of the time.Time pointer passed in or
// time.Time{} if the pointer is nil.
func TimeValue(v *time.Time) time.Time {
if v != nil {
return *v
}
return time.Time{}
}
// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
// The result is undefined if the Unix time cannot be represented by an int64.
// Which includes calling TimeUnixMilli on a zero Time is undefined.
//
// This utility is useful for service API's such as CloudWatch Logs which require
// their unix time values to be in milliseconds.
//
// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
func TimeUnixMilli(t time.Time) int64 {
return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
}
// TimeSlice converts a slice of time.Time values into a slice of
// time.Time pointers
func TimeSlice(src []time.Time) []*time.Time {
dst := make([]*time.Time, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// TimeValueSlice converts a slice of time.Time pointers into a slice of
// time.Time values
func TimeValueSlice(src []*time.Time) []time.Time {
dst := make([]time.Time, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// TimeMap converts a string map of time.Time values into a string
// map of time.Time pointers
func TimeMap(src map[string]time.Time) map[string]*time.Time {
dst := make(map[string]*time.Time)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// TimeValueMap converts a string map of time.Time pointers into a string
// map of time.Time values
func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
dst := make(map[string]time.Time)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}

152
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go

@ -0,0 +1,152 @@
package corehandlers
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"runtime"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// Interface for matching types which also have a Len method.
type lener interface {
Len() int
}
// BuildContentLengthHandler builds the content length of a request based on the body,
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
// to determine request body length and no "Content-Length" was specified it will panic.
//
// The Content-Length will only be aded to the request if the length of the body
// is greater than 0. If the body is empty or the current `Content-Length`
// header is <= 0, the header will also be stripped.
var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
var length int64
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
length, _ = strconv.ParseInt(slength, 10, 64)
} else {
switch body := r.Body.(type) {
case nil:
length = 0
case lener:
length = int64(body.Len())
case io.Seeker:
r.BodyStart, _ = body.Seek(0, 1)
end, _ := body.Seek(0, 2)
body.Seek(r.BodyStart, 0) // make sure to seek back to original location
length = end - r.BodyStart
default:
panic("Cannot get length of body, must provide `ContentLength`")
}
}
if length > 0 {
r.HTTPRequest.ContentLength = length
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
} else {
r.HTTPRequest.ContentLength = 0
r.HTTPRequest.Header.Del("Content-Length")
}
}}
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
var SDKVersionUserAgentHandler = request.NamedHandler{
Name: "core.SDKVersionUserAgentHandler",
Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
runtime.Version(), runtime.GOOS, runtime.GOARCH),
}
var reStatusCode = regexp.MustCompile(`^(\d{3})`)
// SendHandler is a request handler to send service request using HTTP client.
var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) {
var err error
r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest)
if err != nil {
// Prevent leaking if an HTTPResponse was returned. Clean up
// the body.
if r.HTTPResponse != nil {
r.HTTPResponse.Body.Close()
}
// Capture the case where url.Error is returned for error processing
// response. e.g. 301 without location header comes back as string
// error and r.HTTPResponse is nil. Other url redirect errors will
// comeback in a similar method.
if e, ok := err.(*url.Error); ok && e.Err != nil {
if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
code, _ := strconv.ParseInt(s[1], 10, 64)
r.HTTPResponse = &http.Response{
StatusCode: int(code),
Status: http.StatusText(int(code)),
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
return
}
}
if r.HTTPResponse == nil {
// Add a dummy request response object to ensure the HTTPResponse
// value is consistent.
r.HTTPResponse = &http.Response{
StatusCode: int(0),
Status: http.StatusText(int(0)),
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
}
// Catch all other request errors.
r.Error = awserr.New("RequestError", "send request failed", err)
r.Retryable = aws.Bool(true) // network errors are retryable
}
}}
// ValidateResponseHandler is a request handler to validate service response.
var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
// this may be replaced by an UnmarshalError handler
r.Error = awserr.New("UnknownError", "unknown error", nil)
}
}}
// AfterRetryHandler performs final checks to determine if the request should
// be retried and how long to delay.
var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
// If one of the other handlers already set the retry state
// we don't want to override it based on the service's state
if r.Retryable == nil {
r.Retryable = aws.Bool(r.ShouldRetry(r))
}
if r.WillRetry() {
r.RetryDelay = r.RetryRules(r)
r.Config.SleepDelay(r.RetryDelay)
// when the expired token exception occurs the credentials
// need to be expired locally so that the next request to
// get credentials will trigger a credentials refresh.
if r.IsErrorExpired() {
r.Config.Credentials.Expire()
}
r.RetryCount++
r.Error = nil
}
}}
// ValidateEndpointHandler is a request handler to validate a request had the
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
// region is not valid.
var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
r.Error = aws.ErrMissingRegion
} else if r.ClientInfo.Endpoint == "" {
r.Error = aws.ErrMissingEndpoint
}
}}

17
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go

@ -0,0 +1,17 @@
package corehandlers
import "github.com/aws/aws-sdk-go/aws/request"
// ValidateParametersHandler is a request handler to validate the input parameters.
// Validating parameters only has meaning if done prior to the request being sent.
var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
if !r.ParamsFilled() {
return
}
if v, ok := r.Params.(request.Validator); ok {
if err := v.Validate(); err != nil {
r.Error = err
}
}
}}

100
vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go

@ -0,0 +1,100 @@
package credentials
import (
"github.com/aws/aws-sdk-go/aws/awserr"
)
var (
// ErrNoValidProvidersFoundInChain Is returned when there are no valid
// providers in the ChainProvider.
//
// This has been deprecated. For verbose error messaging set
// aws.Config.CredentialsChainVerboseErrors to true
//
// @readonly
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
`no valid providers in chain. Deprecated.
For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
nil)
)
// A ChainProvider will search for a provider which returns credentials
// and cache that provider until Retrieve is called again.
//
// The ChainProvider provides a way of chaining multiple providers together
// which will pick the first available using priority order of the Providers
// in the list.
//
// If none of the Providers retrieve valid credentials Value, ChainProvider's
// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
//
// If a Provider is found which returns valid credentials Value ChainProvider
// will cache that Provider for all calls to IsExpired(), until Retrieve is
// called again.
//
// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
// In this example EnvProvider will first check if any credentials are available
// vai the environment variables. If there are none ChainProvider will check
// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
// does not return any credentials ChainProvider will return the error
// ErrNoValidProvidersFoundInChain
//
// creds := NewChainCredentials(
// []Provider{
// &EnvProvider{},
// &EC2RoleProvider{
// Client: ec2metadata.New(sess),
// },
// })
//
// // Usage of ChainCredentials with aws.Config
// svc := ec2.New(&aws.Config{Credentials: creds})
//
type ChainProvider struct {
Providers []Provider
curr Provider
VerboseErrors bool
}
// NewChainCredentials returns a pointer to a new Credentials object
// wrapping a chain of providers.
func NewChainCredentials(providers []Provider) *Credentials {
return NewCredentials(&ChainProvider{
Providers: append([]Provider{}, providers...),
})
}
// Retrieve returns the credentials value or error if no provider returned
// without error.
//
// If a provider is found it will be cached and any calls to IsExpired()
// will return the expired state of the cached provider.
func (c *ChainProvider) Retrieve() (Value, error) {
var errs []error
for _, p := range c.Providers {
creds, err := p.Retrieve()
if err == nil {
c.curr = p
return creds, nil
}
errs = append(errs, err)
}
c.curr = nil
var err error
err = ErrNoValidProvidersFoundInChain
if c.VerboseErrors {
err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
}
return Value{}, err
}
// IsExpired will returned the expired state of the currently cached provider
// if there is one. If there is no current provider, true will be returned.
func (c *ChainProvider) IsExpired() bool {
if c.curr != nil {
return c.curr.IsExpired()
}
return true
}

223
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go

@ -0,0 +1,223 @@
// Package credentials provides credential retrieval and management
//
// The Credentials is the primary method of getting access to and managing
// credentials Values. Using dependency injection retrieval of the credential
// values is handled by a object which satisfies the Provider interface.
//
// By default the Credentials.Get() will cache the successful result of a
// Provider's Retrieve() until Provider.IsExpired() returns true. At which
// point Credentials will call Provider's Retrieve() to get new credential Value.
//
// The Provider is responsible for determining when credentials Value have expired.
// It is also important to note that Credentials will always call Retrieve the
// first time Credentials.Get() is called.
//
// Example of using the environment variable credentials.
//
// creds := NewEnvCredentials()
//
// // Retrieve the credentials value
// credValue, err := creds.Get()
// if err != nil {
// // handle error
// }
//
// Example of forcing credentials to expire and be refreshed on the next Get().
// This may be helpful to proactively expire credentials and refresh them sooner
// than they would naturally expire on their own.
//
// creds := NewCredentials(&EC2RoleProvider{})
// creds.Expire()
// credsValue, err := creds.Get()
// // New credentials will be retrieved instead of from cache.
//
//
// Custom Provider
//
// Each Provider built into this package also provides a helper method to generate
// a Credentials pointer setup with the provider. To use a custom Provider just
// create a type which satisfies the Provider interface and pass it to the
// NewCredentials method.
//
// type MyProvider struct{}
// func (m *MyProvider) Retrieve() (Value, error) {...}
// func (m *MyProvider) IsExpired() bool {...}
//
// creds := NewCredentials(&MyProvider{})
// credValue, err := creds.Get()
//
package credentials
import (
"sync"
"time"
)
// AnonymousCredentials is an empty Credential object that can be used as
// dummy placeholder credentials for requests that do not need signed.
//
// This Credentials can be used to configure a service to not sign requests
// when making service API calls. For example, when accessing public
// s3 buckets.
//
// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials})
// // Access public S3 buckets.
//
// @readonly
var AnonymousCredentials = NewStaticCredentials("", "", "")
// A Value is the AWS credentials value for individual credential fields.
type Value struct {
// AWS Access key ID
AccessKeyID string
// AWS Secret Access Key
SecretAccessKey string
// AWS Session Token
SessionToken string
// Provider used to get credentials
ProviderName string
}
// A Provider is the interface for any component which will provide credentials
// Value. A provider is required to manage its own Expired state, and what to
// be expired means.
//
// The Provider should not need to implement its own mutexes, because
// that will be managed by Credentials.
type Provider interface {
// Refresh returns nil if it successfully retrieved the value.
// Error is returned if the value were not obtainable, or empty.
Retrieve() (Value, error)
// IsExpired returns if the credentials are no longer valid, and need
// to be retrieved.
IsExpired() bool
}
// A Expiry provides shared expiration logic to be used by credentials
// providers to implement expiry functionality.
//
// The best method to use this struct is as an anonymous field within the
// provider's struct.
//
// Example:
// type EC2RoleProvider struct {
// Expiry
// ...
// }
type Expiry struct {
// The date/time when to expire on
expiration time.Time
// If set will be used by IsExpired to determine the current time.
// Defaults to time.Now if CurrentTime is not set. Available for testing
// to be able to mock out the current time.
CurrentTime func() time.Time
}
// SetExpiration sets the expiration IsExpired will check when called.
//
// If window is greater than 0 the expiration time will be reduced by the
// window value.
//
// Using a window is helpful to trigger credentials to expire sooner than
// the expiration time given to ensure no requests are made with expired
// tokens.
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
e.expiration = expiration
if window > 0 {
e.expiration = e.expiration.Add(-window)
}
}
// IsExpired returns if the credentials are expired.
func (e *Expiry) IsExpired() bool {
if e.CurrentTime == nil {
e.CurrentTime = time.Now
}
return e.expiration.Before(e.CurrentTime())
}
// A Credentials provides synchronous safe retrieval of AWS credentials Value.
// Credentials will cache the credentials value until they expire. Once the value
// expires the next Get will attempt to retrieve valid credentials.
//
// Credentials is safe to use across multiple goroutines and will manage the
// synchronous state so the Providers do not need to implement their own
// synchronization.
//
// The first Credentials.Get() will always call Provider.Retrieve() to get the
// first instance of the credentials Value. All calls to Get() after that
// will return the cached credentials Value until IsExpired() returns true.
type Credentials struct {
creds Value
forceRefresh bool
m sync.Mutex
provider Provider
}
// NewCredentials returns a pointer to a new Credentials with the provider set.
func NewCredentials(provider Provider) *Credentials {
return &Credentials{
provider: provider,
forceRefresh: true,
}
}
// Get returns the credentials value, or error if the credentials Value failed
// to be retrieved.
//
// Will return the cached credentials Value if it has not expired. If the
// credentials Value has expired the Provider's Retrieve() will be called
// to refresh the credentials.
//
// If Credentials.Expire() was called the credentials Value will be force
// expired, and the next call to Get() will cause them to be refreshed.
func (c *Credentials) Get() (Value, error) {
c.m.Lock()
defer c.m.Unlock()
if c.isExpired() {
creds, err := c.provider.Retrieve()
if err != nil {
return Value{}, err
}
c.creds = creds
c.forceRefresh = false
}
return c.creds, nil
}
// Expire expires the credentials and forces them to be retrieved on the
// next call to Get().
//
// This will override the Provider's expired state, and force Credentials
// to call the Provider's Retrieve().
func (c *Credentials) Expire() {
c.m.Lock()
defer c.m.Unlock()
c.forceRefresh = true
}
// IsExpired returns if the credentials are no longer valid, and need
// to be retrieved.
//
// If the Credentials were forced to be expired with Expire() this will
// reflect that override.
func (c *Credentials) IsExpired() bool {
c.m.Lock()
defer c.m.Unlock()
return c.isExpired()
}
// isExpired helper method wrapping the definition of expired credentials.
func (c *Credentials) isExpired() bool {
return c.forceRefresh || c.provider.IsExpired()
}

178
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go

@ -0,0 +1,178 @@
package ec2rolecreds
import (
"bufio"
"encoding/json"
"fmt"
"path"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
)
// ProviderName provides a name of EC2Role provider
const ProviderName = "EC2RoleProvider"
// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
// those credentials are expired.
//
// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
// or ExpiryWindow
//
// p := &ec2rolecreds.EC2RoleProvider{
// // Pass in a custom timeout to be used when requesting
// // IAM EC2 Role credentials.
// Client: ec2metadata.New(sess, aws.Config{
// HTTPClient: &http.Client{Timeout: 10 * time.Second},
// }),
//
// // Do not use early expiry of credentials. If a non zero value is
// // specified the credentials will be expired early
// ExpiryWindow: 0,
// }
type EC2RoleProvider struct {
credentials.Expiry
// Required EC2Metadata client to use when connecting to EC2 metadata service.
Client *ec2metadata.EC2Metadata
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
}
// NewCredentials returns a pointer to a new Credentials object wrapping
// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
// The ConfigProvider is satisfied by the session.Session type.
func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
p := &EC2RoleProvider{
Client: ec2metadata.New(c),
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
// metadata service.
func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
p := &EC2RoleProvider{
Client: client,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// Retrieve retrieves credentials from the EC2 service.
// Error will be returned if the request fails, or unable to extract
// the desired credentials.
func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
credsList, err := requestCredList(m.Client)
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
if len(credsList) == 0 {
return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
}
credsName := credsList[0]
roleCreds, err := requestCred(m.Client, credsName)
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
return credentials.Value{
AccessKeyID: roleCreds.AccessKeyID,
SecretAccessKey: roleCreds.SecretAccessKey,
SessionToken: roleCreds.Token,
ProviderName: ProviderName,
}, nil
}
// A ec2RoleCredRespBody provides the shape for unmarshalling credential
// request responses.
type ec2RoleCredRespBody struct {
// Success State
Expiration time.Time
AccessKeyID string
SecretAccessKey string
Token string
// Error state
Code string
Message string
}
const iamSecurityCredsPath = "/iam/security-credentials"
// requestCredList requests a list of credentials from the EC2 service.
// If there are no credentials, or there is an error making or receiving the request
func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
resp, err := client.GetMetadata(iamSecurityCredsPath)
if err != nil {
return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
}
credsList := []string{}
s := bufio.NewScanner(strings.NewReader(resp))
for s.Scan() {
credsList = append(credsList, s.Text())
}
if err := s.Err(); err != nil {
return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
}
return credsList, nil
}
// requestCred requests the credentials for a specific credentials from the EC2 service.
//
// If the credentials cannot be found, or there is an error reading the response
// and error will be returned.
func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
if err != nil {
return ec2RoleCredRespBody{},
awserr.New("EC2RoleRequestError",
fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
err)
}
respCreds := ec2RoleCredRespBody{}
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
return ec2RoleCredRespBody{},
awserr.New("SerializationError",
fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
err)
}
if respCreds.Code != "Success" {
// If an error code was returned something failed requesting the role.
return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
}
return respCreds, nil
}

77
vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go

@ -0,0 +1,77 @@
package credentials
import (
"os"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// EnvProviderName provides a name of Env provider
const EnvProviderName = "EnvProvider"
var (
// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
// found in the process's environment.
//
// @readonly
ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
// can't be found in the process's environment.
//
// @readonly
ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
)
// A EnvProvider retrieves credentials from the environment variables of the
// running process. Environment credentials never expire.
//
// Environment variables used:
//
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
type EnvProvider struct {
retrieved bool
}
// NewEnvCredentials returns a pointer to a new Credentials object
// wrapping the environment variable provider.
func NewEnvCredentials() *Credentials {
return NewCredentials(&EnvProvider{})
}
// Retrieve retrieves the keys from the environment.
func (e *EnvProvider) Retrieve() (Value, error) {
e.retrieved = false
id := os.Getenv("AWS_ACCESS_KEY_ID")
if id == "" {
id = os.Getenv("AWS_ACCESS_KEY")
}
secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
if secret == "" {
secret = os.Getenv("AWS_SECRET_KEY")
}
if id == "" {
return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
}
if secret == "" {
return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
}
e.retrieved = true
return Value{
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
ProviderName: EnvProviderName,
}, nil
}
// IsExpired returns if the credentials have been retrieved.
func (e *EnvProvider) IsExpired() bool {
return !e.retrieved
}

12
vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini

@ -0,0 +1,12 @@
[default]
aws_access_key_id = accessKey
aws_secret_access_key = secret
aws_session_token = token
[no_token]
aws_access_key_id = accessKey
aws_secret_access_key = secret
[with_colon]
aws_access_key_id: accessKey
aws_secret_access_key: secret

151
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go

@ -0,0 +1,151 @@
package credentials
import (
"fmt"
"os"
"path/filepath"
"github.com/go-ini/ini"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// SharedCredsProviderName provides a name of SharedCreds provider
const SharedCredsProviderName = "SharedCredentialsProvider"
var (
// ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
//
// @readonly
ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
)
// A SharedCredentialsProvider retrieves credentials from the current user's home
// directory, and keeps track if those credentials are expired.
//
// Profile ini file example: $HOME/.aws/credentials
type SharedCredentialsProvider struct {
// Path to the shared credentials file.
//
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
// env value is empty will default to current user's home directory.
// Linux/OSX: "$HOME/.aws/credentials"
// Windows: "%USERPROFILE%\.aws\credentials"
Filename string
// AWS Profile to extract credentials from the shared credentials file. If empty
// will default to environment variable "AWS_PROFILE" or "default" if
// environment variable is also not set.
Profile string
// retrieved states if the credentials have been successfully retrieved.
retrieved bool
}
// NewSharedCredentials returns a pointer to a new Credentials object
// wrapping the Profile file provider.
func NewSharedCredentials(filename, profile string) *Credentials {
return NewCredentials(&SharedCredentialsProvider{
Filename: filename,
Profile: profile,
})
}
// Retrieve reads and extracts the shared credentials from the current
// users home directory.
func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
p.retrieved = false
filename, err := p.filename()
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, err
}
creds, err := loadProfile(filename, p.profile())
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, err
}
p.retrieved = true
return creds, nil
}
// IsExpired returns if the shared credentials have expired.
func (p *SharedCredentialsProvider) IsExpired() bool {
return !p.retrieved
}
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
// The credentials retrieved from the profile will be returned or error. Error will be
// returned if it fails to read from the file, or the data is invalid.
func loadProfile(filename, profile string) (Value, error) {
config, err := ini.Load(filename)
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
}
iniProfile, err := config.GetSection(profile)
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
}
id, err := iniProfile.GetKey("aws_access_key_id")
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
err)
}
secret, err := iniProfile.GetKey("aws_secret_access_key")
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
nil)
}
// Default to empty string if not found
token := iniProfile.Key("aws_session_token")
return Value{
AccessKeyID: id.String(),
SecretAccessKey: secret.String(),
SessionToken: token.String(),
ProviderName: SharedCredsProviderName,
}, nil
}
// filename returns the filename to use to read AWS shared credentials.
//
// Will return an error if the user's home directory path cannot be found.
func (p *SharedCredentialsProvider) filename() (string, error) {
if p.Filename == "" {
if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" {
return p.Filename, nil
}
homeDir := os.Getenv("HOME") // *nix
if homeDir == "" { // Windows
homeDir = os.Getenv("USERPROFILE")
}
if homeDir == "" {
return "", ErrSharedCredentialsHomeNotFound
}
p.Filename = filepath.Join(homeDir, ".aws", "credentials")
}
return p.Filename, nil
}
// profile returns the AWS shared credentials profile. If empty will read
// environment variable "AWS_PROFILE". If that is not set profile will
// return "default".
func (p *SharedCredentialsProvider) profile() string {
if p.Profile == "" {
p.Profile = os.Getenv("AWS_PROFILE")
}
if p.Profile == "" {
p.Profile = "default"
}
return p.Profile
}

48
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go

@ -0,0 +1,48 @@
package credentials
import (
"github.com/aws/aws-sdk-go/aws/awserr"
)
// StaticProviderName provides a name of Static provider
const StaticProviderName = "StaticProvider"
var (
// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
//
// @readonly
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
)
// A StaticProvider is a set of credentials which are set programmatically,
// and will never expire.
type StaticProvider struct {
Value
}
// NewStaticCredentials returns a pointer to a new Credentials object
// wrapping a static credentials value provider.
func NewStaticCredentials(id, secret, token string) *Credentials {
return NewCredentials(&StaticProvider{Value: Value{
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: token,
}})
}
// Retrieve returns the credentials or error if the credentials are invalid.
func (s *StaticProvider) Retrieve() (Value, error) {
if s.AccessKeyID == "" || s.SecretAccessKey == "" {
return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
}
s.Value.ProviderName = StaticProviderName
return s.Value, nil
}
// IsExpired returns if the credentials are expired.
//
// For StaticProvider, the credentials never expired.
func (s *StaticProvider) IsExpired() bool {
return false
}

161
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go

@ -0,0 +1,161 @@
// Package stscreds are credential Providers to retrieve STS AWS credentials.
//
// STS provides multiple ways to retrieve credentials which can be used when making
// future AWS service API operation calls.
package stscreds
import (
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/service/sts"
)
// ProviderName provides a name of AssumeRole provider
const ProviderName = "AssumeRoleProvider"
// AssumeRoler represents the minimal subset of the STS client API used by this provider.
type AssumeRoler interface {
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
}
// DefaultDuration is the default amount of time in minutes that the credentials
// will be valid for.
var DefaultDuration = time.Duration(15) * time.Minute
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
// keeps track of their expiration time. This provider must be used explicitly,
// as it is not included in the credentials chain.
type AssumeRoleProvider struct {
credentials.Expiry
// STS client to make assume role request with.
Client AssumeRoler
// Role to be assumed.
RoleARN string
// Session name, if you wish to reuse the credentials elsewhere.
RoleSessionName string
// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
Duration time.Duration
// Optional ExternalID to pass along, defaults to nil if not set.
ExternalID *string
// The policy plain text must be 2048 bytes or shorter. However, an internal
// conversion compresses it into a packed binary format with a separate limit.
// The PackedPolicySize response element indicates by percentage how close to
// the upper size limit the policy is, with 100% equaling the maximum allowed
// size.
Policy *string
// The identification number of the MFA device that is associated with the user
// who is making the AssumeRole call. Specify this value if the trust policy
// of the role being assumed includes a condition that requires MFA authentication.
// The value is either the serial number for a hardware device (such as GAHT12345678)
// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
SerialNumber *string
// The value provided by the MFA device, if the trust policy of the role being
// assumed requires MFA (that is, if the policy includes a condition that tests
// for MFA). If the role being assumed requires MFA and if the TokenCode value
// is missing or expired, the AssumeRole call returns an "access denied" error.
TokenCode *string
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
}
// NewCredentials returns a pointer to a new Credentials object wrapping the
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
// role will be named after a nanosecond timestamp of this operation.
//
// Takes a Config provider to create the STS client. The ConfigProvider is
// satisfied by the session.Session type.
func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
p := &AssumeRoleProvider{
Client: sts.New(c),
RoleARN: roleARN,
Duration: DefaultDuration,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
// role will be named after a nanosecond timestamp of this operation.
//
// Takes an AssumeRoler which can be satisfiede by the STS client.
func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
p := &AssumeRoleProvider{
Client: svc,
RoleARN: roleARN,
Duration: DefaultDuration,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// Retrieve generates a new set of temporary credentials using STS.
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
// Apply defaults where parameters are not set.
if p.RoleSessionName == "" {
// Try to work out a role name that will hopefully end up unique.
p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
}
if p.Duration == 0 {
// Expire as often as AWS permits.
p.Duration = DefaultDuration
}
input := &sts.AssumeRoleInput{
DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
RoleArn: aws.String(p.RoleARN),
RoleSessionName: aws.String(p.RoleSessionName),
ExternalId: p.ExternalID,
}
if p.Policy != nil {
input.Policy = p.Policy
}
if p.SerialNumber != nil && p.TokenCode != nil {
input.SerialNumber = p.SerialNumber
input.TokenCode = p.TokenCode
}
roleOutput, err := p.Client.AssumeRole(input)
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
// We will proactively generate new credentials before they expire.
p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
return credentials.Value{
AccessKeyID: *roleOutput.Credentials.AccessKeyId,
SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
SessionToken: *roleOutput.Credentials.SessionToken,
ProviderName: ProviderName,
}, nil
}

98
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go

@ -0,0 +1,98 @@
// Package defaults is a collection of helpers to retrieve the SDK's default
// configuration and handlers.
//
// Generally this package shouldn't be used directly, but session.Session
// instead. This package is useful when you need to reset the defaults
// of a session or service client to the SDK defaults before setting
// additional parameters.
package defaults
import (
"net/http"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/endpoints"
)
// A Defaults provides a collection of default values for SDK clients.
type Defaults struct {
Config *aws.Config
Handlers request.Handlers
}
// Get returns the SDK's default values with Config and handlers pre-configured.
func Get() Defaults {
cfg := Config()
handlers := Handlers()
cfg.Credentials = CredChain(cfg, handlers)
return Defaults{
Config: cfg,
Handlers: handlers,
}
}
// Config returns the default configuration without credentials.
// To retrieve a config with credentials also included use
// `defaults.Get().Config` instead.
//
// Generally you shouldn't need to use this method directly, but
// is available if you need to reset the configuration of an
// existing service client or session.
func Config() *aws.Config {
return aws.NewConfig().
WithCredentials(credentials.AnonymousCredentials).
WithRegion(os.Getenv("AWS_REGION")).
WithHTTPClient(http.DefaultClient).
WithMaxRetries(aws.UseServiceDefaultRetries).
WithLogger(aws.NewDefaultLogger()).
WithLogLevel(aws.LogOff).
WithSleepDelay(time.Sleep)
}
// Handlers returns the default request handlers.
//
// Generally you shouldn't need to use this method directly, but
// is available if you need to reset the request handlers of an
// existing service client or session.
func Handlers() request.Handlers {
var handlers request.Handlers
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
handlers.Build.AfterEachFn = request.HandlerListStopOnError
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
handlers.Send.PushBackNamed(corehandlers.SendHandler)
handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
return handlers
}
// CredChain returns the default credential chain.
//
// Generally you shouldn't need to use this method directly, but
// is available if you need to reset the credentials of an
// existing service client or session's Config.
func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true)
return credentials.NewCredentials(&credentials.ChainProvider{
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
Providers: []credentials.Provider{
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
&ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion),
ExpiryWindow: 5 * time.Minute,
},
}})
}

140
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go

@ -0,0 +1,140 @@
package ec2metadata
import (
"encoding/json"
"fmt"
"path"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// GetMetadata uses the path provided to request information from the EC2
// instance metdata service. The content will be returned as a string, or
// error if the request failed.
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
op := &request.Operation{
Name: "GetMetadata",
HTTPMethod: "GET",
HTTPPath: path.Join("/", "meta-data", p),
}
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
return output.Content, req.Send()
}
// GetDynamicData uses the path provided to request information from the EC2
// instance metadata service for dynamic data. The content will be returned
// as a string, or error if the request failed.
func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
op := &request.Operation{
Name: "GetDynamicData",
HTTPMethod: "GET",
HTTPPath: path.Join("/", "dynamic", p),
}
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
return output.Content, req.Send()
}
// GetInstanceIdentityDocument retrieves an identity document describing an
// instance. Error is returned if the request fails or is unable to parse
// the response.
func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
resp, err := c.GetDynamicData("instance-identity/document")
if err != nil {
return EC2InstanceIdentityDocument{},
awserr.New("EC2MetadataRequestError",
"failed to get EC2 instance identity document", err)
}
doc := EC2InstanceIdentityDocument{}
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
return EC2InstanceIdentityDocument{},
awserr.New("SerializationError",
"failed to decode EC2 instance identity document", err)
}
return doc, nil
}
// IAMInfo retrieves IAM info from the metadata API
func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
resp, err := c.GetMetadata("iam/info")
if err != nil {
return EC2IAMInfo{},
awserr.New("EC2MetadataRequestError",
"failed to get EC2 IAM info", err)
}
info := EC2IAMInfo{}
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
return EC2IAMInfo{},
awserr.New("SerializationError",
"failed to decode EC2 IAM info", err)
}
if info.Code != "Success" {
errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
return EC2IAMInfo{},
awserr.New("EC2MetadataError", errMsg, nil)
}
return info, nil
}
// Region returns the region the instance is running in.
func (c *EC2Metadata) Region() (string, error) {
resp, err := c.GetMetadata("placement/availability-zone")
if err != nil {
return "", err
}
// returns region without the suffix. Eg: us-west-2a becomes us-west-2
return resp[:len(resp)-1], nil
}
// Available returns if the application has access to the EC2 Metadata service.
// Can be used to determine if application is running within an EC2 Instance and
// the metadata service is available.
func (c *EC2Metadata) Available() bool {
if _, err := c.GetMetadata("instance-id"); err != nil {
return false
}
return true
}
// An EC2IAMInfo provides the shape for unmarshalling
// an IAM info from the metadata API
type EC2IAMInfo struct {
Code string
LastUpdated time.Time
InstanceProfileArn string
InstanceProfileID string
}
// An EC2InstanceIdentityDocument provides the shape for unmarshalling
// an instance identity document
type EC2InstanceIdentityDocument struct {
DevpayProductCodes []string `json:"devpayProductCodes"`
AvailabilityZone string `json:"availabilityZone"`
PrivateIP string `json:"privateIp"`
Version string `json:"version"`
Region string `json:"region"`
InstanceID string `json:"instanceId"`
BillingProducts []string `json:"billingProducts"`
InstanceType string `json:"instanceType"`
AccountID string `json:"accountId"`
PendingTime time.Time `json:"pendingTime"`
ImageID string `json:"imageId"`
KernelID string `json:"kernelId"`
RamdiskID string `json:"ramdiskId"`
Architecture string `json:"architecture"`
}

124
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go

@ -0,0 +1,124 @@
// Package ec2metadata provides the client for making API calls to the
// EC2 Metadata service.
package ec2metadata
import (
"bytes"
"errors"
"io"
"net/http"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
)
// ServiceName is the name of the service.
const ServiceName = "ec2metadata"
// A EC2Metadata is an EC2 Metadata service Client.
type EC2Metadata struct {
*client.Client
}
// New creates a new instance of the EC2Metadata client with a session.
// This client is safe to use across multiple goroutines.
//
//
// Example:
// // Create a EC2Metadata client from just a session.
// svc := ec2metadata.New(mySession)
//
// // Create a EC2Metadata client with additional configuration
// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
c := p.ClientConfig(ServiceName, cfgs...)
return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// NewClient returns a new EC2Metadata client. Should be used to create
// a client when not using a session. Generally using just New with a session
// is preferred.
//
// If an unmodified HTTP client is provided from the stdlib default, or no client
// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
// If the http client is unmodified and this feature is not disabled
// set custom timeouts for EC2Metadata requests.
cfg.HTTPClient = &http.Client{
// use a shorter timeout than default because the metadata
// service is local if it is running, and to fail faster
// if not running on an ec2 instance.
Timeout: 5 * time.Second,
}
}
svc := &EC2Metadata{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
Endpoint: endpoint,
APIVersion: "latest",
},
handlers,
),
}
svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
svc.Handlers.UnmarshalError.PushBack(unmarshalError)
svc.Handlers.Validate.Clear()
svc.Handlers.Validate.PushBack(validateEndpointHandler)
// Add additional options to the service config
for _, option := range opts {
option(svc.Client)
}
return svc
}
func httpClientZero(c *http.Client) bool {
return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
}
type metadataOutput struct {
Content string
}
func unmarshalHandler(r *request.Request) {
defer r.HTTPResponse.Body.Close()
b := &bytes.Buffer{}
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
return
}
if data, ok := r.Data.(*metadataOutput); ok {
data.Content = b.String()
}
}
func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
b := &bytes.Buffer{}
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
return
}
// Response body format is not consistent between metadata endpoints.
// Grab the error message as a string and include that as the source error
r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
}
func validateEndpointHandler(r *request.Request) {
if r.ClientInfo.Endpoint == "" {
r.Error = aws.ErrMissingEndpoint
}
}

17
vendor/github.com/aws/aws-sdk-go/aws/errors.go

@ -0,0 +1,17 @@
package aws
import "github.com/aws/aws-sdk-go/aws/awserr"
var (
// ErrMissingRegion is an error that is returned if region configuration is
// not found.
//
// @readonly
ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
// resolved for a service.
//
// @readonly
ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
)

112
vendor/github.com/aws/aws-sdk-go/aws/logger.go

@ -0,0 +1,112 @@
package aws
import (
"log"
"os"
)
// A LogLevelType defines the level logging should be performed at. Used to instruct
// the SDK which statements should be logged.
type LogLevelType uint
// LogLevel returns the pointer to a LogLevel. Should be used to workaround
// not being able to take the address of a non-composite literal.
func LogLevel(l LogLevelType) *LogLevelType {
return &l
}
// Value returns the LogLevel value or the default value LogOff if the LogLevel
// is nil. Safe to use on nil value LogLevelTypes.
func (l *LogLevelType) Value() LogLevelType {
if l != nil {
return *l
}
return LogOff
}
// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
// LogLevel is nill, will default to LogOff comparison.
func (l *LogLevelType) Matches(v LogLevelType) bool {
c := l.Value()
return c&v == v
}
// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
// to LogOff comparison.
func (l *LogLevelType) AtLeast(v LogLevelType) bool {
c := l.Value()
return c >= v
}
const (
// LogOff states that no logging should be performed by the SDK. This is the
// default state of the SDK, and should be use to disable all logging.
LogOff LogLevelType = iota * 0x1000
// LogDebug state that debug output should be logged by the SDK. This should
// be used to inspect request made and responses received.
LogDebug
)
// Debug Logging Sub Levels
const (
// LogDebugWithSigning states that the SDK should log request signing and
// presigning events. This should be used to log the signing details of
// requests for debugging. Will also enable LogDebug.
LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
// LogDebugWithHTTPBody states the SDK should log HTTP request and response
// HTTP bodys in addition to the headers and path. This should be used to
// see the body content of requests and responses made while using the SDK
// Will also enable LogDebug.
LogDebugWithHTTPBody
// LogDebugWithRequestRetries states the SDK should log when service requests will
// be retried. This should be used to log when you want to log when service
// requests are being retried. Will also enable LogDebug.
LogDebugWithRequestRetries
// LogDebugWithRequestErrors states the SDK should log when service requests fail
// to build, send, validate, or unmarshal.
LogDebugWithRequestErrors
)
// A Logger is a minimalistic interface for the SDK to log messages to. Should
// be used to provide custom logging writers for the SDK to use.
type Logger interface {
Log(...interface{})
}
// A LoggerFunc is a convenience type to convert a function taking a variadic
// list of arguments and wrap it so the Logger interface can be used.
//
// Example:
// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
// fmt.Fprintln(os.Stdout, args...)
// })})
type LoggerFunc func(...interface{})
// Log calls the wrapped function with the arguments provided
func (f LoggerFunc) Log(args ...interface{}) {
f(args...)
}
// NewDefaultLogger returns a Logger which will write log messages to stdout, and
// use same formatting runes as the stdlib log.Logger
func NewDefaultLogger() Logger {
return &defaultLogger{
logger: log.New(os.Stdout, "", log.LstdFlags),
}
}
// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
type defaultLogger struct {
logger *log.Logger
}
// Log logs the parameters to the stdlib logger. See log.Println.
func (l defaultLogger) Log(args ...interface{}) {
l.logger.Println(args...)
}

187
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go

@ -0,0 +1,187 @@
package request
import (
"fmt"
"strings"
)
// A Handlers provides a collection of request handlers for various
// stages of handling requests.
type Handlers struct {
Validate HandlerList
Build HandlerList
Sign HandlerList
Send HandlerList
ValidateResponse HandlerList
Unmarshal HandlerList
UnmarshalMeta HandlerList
UnmarshalError HandlerList
Retry HandlerList
AfterRetry HandlerList
}
// Copy returns of this handler's lists.
func (h *Handlers) Copy() Handlers {
return Handlers{
Validate: h.Validate.copy(),
Build: h.Build.copy(),
Sign: h.Sign.copy(),
Send: h.Send.copy(),
ValidateResponse: h.ValidateResponse.copy(),
Unmarshal: h.Unmarshal.copy(),
UnmarshalError: h.UnmarshalError.copy(),
UnmarshalMeta: h.UnmarshalMeta.copy(),
Retry: h.Retry.copy(),
AfterRetry: h.AfterRetry.copy(),
}
}
// Clear removes callback functions for all handlers
func (h *Handlers) Clear() {
h.Validate.Clear()
h.Build.Clear()
h.Send.Clear()
h.Sign.Clear()
h.Unmarshal.Clear()
h.UnmarshalMeta.Clear()
h.UnmarshalError.Clear()
h.ValidateResponse.Clear()
h.Retry.Clear()
h.AfterRetry.Clear()
}
// A HandlerListRunItem represents an entry in the HandlerList which
// is being run.
type HandlerListRunItem struct {
Index int
Handler NamedHandler
Request *Request
}
// A HandlerList manages zero or more handlers in a list.
type HandlerList struct {
list []NamedHandler
// Called after each request handler in the list is called. If set
// and the func returns true the HandlerList will continue to iterate
// over the request handlers. If false is returned the HandlerList
// will stop iterating.
//
// Should be used if extra logic to be performed between each handler
// in the list. This can be used to terminate a list's iteration
// based on a condition such as error like, HandlerListStopOnError.
// Or for logging like HandlerListLogItem.
AfterEachFn func(item HandlerListRunItem) bool
}
// A NamedHandler is a struct that contains a name and function callback.
type NamedHandler struct {
Name string
Fn func(*Request)
}
// copy creates a copy of the handler list.
func (l *HandlerList) copy() HandlerList {
n := HandlerList{
AfterEachFn: l.AfterEachFn,
}
n.list = append([]NamedHandler{}, l.list...)
return n
}
// Clear clears the handler list.
func (l *HandlerList) Clear() {
l.list = []NamedHandler{}
}
// Len returns the number of handlers in the list.
func (l *HandlerList) Len() int {
return len(l.list)
}
// PushBack pushes handler f to the back of the handler list.
func (l *HandlerList) PushBack(f func(*Request)) {
l.list = append(l.list, NamedHandler{"__anonymous", f})
}
// PushFront pushes handler f to the front of the handler list.
func (l *HandlerList) PushFront(f func(*Request)) {
l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...)
}
// PushBackNamed pushes named handler f to the back of the handler list.
func (l *HandlerList) PushBackNamed(n NamedHandler) {
l.list = append(l.list, n)
}
// PushFrontNamed pushes named handler f to the front of the handler list.
func (l *HandlerList) PushFrontNamed(n NamedHandler) {
l.list = append([]NamedHandler{n}, l.list...)
}
// Remove removes a NamedHandler n
func (l *HandlerList) Remove(n NamedHandler) {
newlist := []NamedHandler{}
for _, m := range l.list {
if m.Name != n.Name {
newlist = append(newlist, m)
}
}
l.list = newlist
}
// Run executes all handlers in the list with a given request object.
func (l *HandlerList) Run(r *Request) {
for i, h := range l.list {
h.Fn(r)
item := HandlerListRunItem{
Index: i, Handler: h, Request: r,
}
if l.AfterEachFn != nil && !l.AfterEachFn(item) {
return
}
}
}
// HandlerListLogItem logs the request handler and the state of the
// request's Error value. Always returns true to continue iterating
// request handlers in a HandlerList.
func HandlerListLogItem(item HandlerListRunItem) bool {
if item.Request.Config.Logger == nil {
return true
}
item.Request.Config.Logger.Log("DEBUG: RequestHandler",
item.Index, item.Handler.Name, item.Request.Error)
return true
}
// HandlerListStopOnError returns false to stop the HandlerList iterating
// over request handlers if Request.Error is not nil. True otherwise
// to continue iterating.
func HandlerListStopOnError(item HandlerListRunItem) bool {
return item.Request.Error == nil
}
// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
// header. If the extra parameters are provided they will be added as metadata to the
// name/version pair resulting in the following format.
// "name/version (extra0; extra1; ...)"
// The user agent part will be concatenated with this current request's user agent string.
func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
ua := fmt.Sprintf("%s/%s", name, version)
if len(extra) > 0 {
ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
}
return func(r *Request) {
AddToUserAgent(r, ua)
}
}
// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
// The input string will be concatenated with the current request's user agent string.
func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
return func(r *Request) {
AddToUserAgent(r, s)
}
}

33
vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go

@ -0,0 +1,33 @@
// +build go1.5
package request
import (
"io"
"net/http"
"net/url"
)
func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
req := &http.Request{
URL: &url.URL{},
Header: http.Header{},
Close: r.Close,
Body: body,
Host: r.Host,
Method: r.Method,
Proto: r.Proto,
ContentLength: r.ContentLength,
// Cancel will be deprecated in 1.7 and will be replaced with Context
Cancel: r.Cancel,
}
*req.URL = *r.URL
for k, v := range r.Header {
for _, vv := range v {
req.Header.Add(k, vv)
}
}
return req
}

31
vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go

@ -0,0 +1,31 @@
// +build !go1.5
package request
import (
"io"
"net/http"
"net/url"
)
func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
req := &http.Request{
URL: &url.URL{},
Header: http.Header{},
Close: r.Close,
Body: body,
Host: r.Host,
Method: r.Method,
Proto: r.Proto,
ContentLength: r.ContentLength,
}
*req.URL = *r.URL
for k, v := range r.Header {
for _, vv := range v {
req.Header.Add(k, vv)
}
}
return req
}

49
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go

@ -0,0 +1,49 @@
package request
import (
"io"
"sync"
)
// offsetReader is a thread-safe io.ReadCloser to prevent racing
// with retrying requests
type offsetReader struct {
buf io.ReadSeeker
lock sync.RWMutex
closed bool
}
func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
reader := &offsetReader{}
buf.Seek(offset, 0)
reader.buf = buf
return reader
}
// Close is a thread-safe close. Uses the write lock.
func (o *offsetReader) Close() error {
o.lock.Lock()
defer o.lock.Unlock()
o.closed = true
return nil
}
// Read is a thread-safe read using a read lock.
func (o *offsetReader) Read(p []byte) (int, error) {
o.lock.RLock()
defer o.lock.RUnlock()
if o.closed {
return 0, io.EOF
}
return o.buf.Read(p)
}
// CloseAndCopy will return a new offsetReader with a copy of the old buffer
// and close the old buffer.
func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
o.Close()
return newOffsetReader(o.buf, offset)
}

329
vendor/github.com/aws/aws-sdk-go/aws/request/request.go

@ -0,0 +1,329 @@
package request
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client/metadata"
)
// A Request is the service request to be made.
type Request struct {
Config aws.Config
ClientInfo metadata.ClientInfo
Handlers Handlers
Retryer
Time time.Time
ExpireTime time.Duration
Operation *Operation
HTTPRequest *http.Request
HTTPResponse *http.Response
Body io.ReadSeeker
BodyStart int64 // offset from beginning of Body that the request body starts
Params interface{}
Error error
Data interface{}
RequestID string
RetryCount int
Retryable *bool
RetryDelay time.Duration
NotHoist bool
SignedHeaderVals http.Header
built bool
}
// An Operation is the service API operation to be made.
type Operation struct {
Name string
HTTPMethod string
HTTPPath string
*Paginator
}
// Paginator keeps track of pagination configuration for an API operation.
type Paginator struct {
InputTokens []string
OutputTokens []string
LimitToken string
TruncationToken string
}
// New returns a new Request pointer for the service API
// operation and parameters.
//
// Params is any value of input parameters to be the request payload.
// Data is pointer value to an object which the request's response
// payload will be deserialized to.
func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
method := operation.HTTPMethod
if method == "" {
method = "POST"
}
p := operation.HTTPPath
if p == "" {
p = "/"
}
httpReq, _ := http.NewRequest(method, "", nil)
var err error
httpReq.URL, err = url.Parse(clientInfo.Endpoint + p)
if err != nil {
httpReq.URL = &url.URL{}
err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
}
r := &Request{
Config: cfg,
ClientInfo: clientInfo,
Handlers: handlers.Copy(),
Retryer: retryer,
Time: time.Now(),
ExpireTime: 0,
Operation: operation,
HTTPRequest: httpReq,
Body: nil,
Params: params,
Error: err,
Data: data,
}
r.SetBufferBody([]byte{})
return r
}
// WillRetry returns if the request's can be retried.
func (r *Request) WillRetry() bool {
return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
}
// ParamsFilled returns if the request's parameters have been populated
// and the parameters are valid. False is returned if no parameters are
// provided or invalid.
func (r *Request) ParamsFilled() bool {
return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
}
// DataFilled returns true if the request's data for response deserialization
// target has been set and is a valid. False is returned if data is not
// set, or is invalid.
func (r *Request) DataFilled() bool {
return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
}
// SetBufferBody will set the request's body bytes that will be sent to
// the service API.
func (r *Request) SetBufferBody(buf []byte) {
r.SetReaderBody(bytes.NewReader(buf))
}
// SetStringBody sets the body of the request to be backed by a string.
func (r *Request) SetStringBody(s string) {
r.SetReaderBody(strings.NewReader(s))
}
// SetReaderBody will set the request's body reader.
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
r.HTTPRequest.Body = newOffsetReader(reader, 0)
r.Body = reader
}
// Presign returns the request's signed URL. Error will be returned
// if the signing fails.
func (r *Request) Presign(expireTime time.Duration) (string, error) {
r.ExpireTime = expireTime
r.NotHoist = false
r.Sign()
if r.Error != nil {
return "", r.Error
}
return r.HTTPRequest.URL.String(), nil
}
// PresignRequest behaves just like presign, but hoists all headers and signs them.
// Also returns the signed hash back to the user
func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
r.ExpireTime = expireTime
r.NotHoist = true
r.Sign()
if r.Error != nil {
return "", nil, r.Error
}
return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
}
func debugLogReqError(r *Request, stage string, retrying bool, err error) {
if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
return
}
retryStr := "not retrying"
if retrying {
retryStr = "will retry"
}
r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
}
// Build will build the request's object so it can be signed and sent
// to the service. Build will also validate all the request's parameters.
// Anny additional build Handlers set on this request will be run
// in the order they were set.
//
// The request will only be built once. Multiple calls to build will have
// no effect.
//
// If any Validate or Build errors occur the build will stop and the error
// which occurred will be returned.
func (r *Request) Build() error {
if !r.built {
r.Handlers.Validate.Run(r)
if r.Error != nil {
debugLogReqError(r, "Validate Request", false, r.Error)
return r.Error
}
r.Handlers.Build.Run(r)
if r.Error != nil {
debugLogReqError(r, "Build Request", false, r.Error)
return r.Error
}
r.built = true
}
return r.Error
}
// Sign will sign the request returning error if errors are encountered.
//
// Send will build the request prior to signing. All Sign Handlers will
// be executed in the order they were set.
func (r *Request) Sign() error {
r.Build()
if r.Error != nil {
debugLogReqError(r, "Build Request", false, r.Error)
return r.Error
}
r.Handlers.Sign.Run(r)
return r.Error
}
// Send will send the request returning error if errors are encountered.
//
// Send will sign the request prior to sending. All Send Handlers will
// be executed in the order they were set.
//
// Canceling a request is non-deterministic. If a request has been canceled,
// then the transport will choose, randomly, one of the state channels during
// reads or getting the connection.
//
// readLoop() and getConn(req *Request, cm connectMethod)
// https://github.com/golang/go/blob/master/src/net/http/transport.go
func (r *Request) Send() error {
for {
if aws.BoolValue(r.Retryable) {
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
}
var body io.ReadCloser
if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok {
body = reader.CloseAndCopy(r.BodyStart)
} else {
if r.Config.Logger != nil {
r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions")
}
r.Body.Seek(r.BodyStart, 0)
body = ioutil.NopCloser(r.Body)
}
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body)
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
// Closing response body. Since we are setting a new request to send off, this
// response will get squashed and leaked.
r.HTTPResponse.Body.Close()
}
}
r.Sign()
if r.Error != nil {
return r.Error
}
r.Retryable = nil
r.Handlers.Send.Run(r)
if r.Error != nil {
if strings.Contains(r.Error.Error(), "net/http: request canceled") {
return r.Error
}
err := r.Error
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
debugLogReqError(r, "Send Request", false, r.Error)
return r.Error
}
debugLogReqError(r, "Send Request", true, err)
continue
}
r.Handlers.UnmarshalMeta.Run(r)
r.Handlers.ValidateResponse.Run(r)
if r.Error != nil {
err := r.Error
r.Handlers.UnmarshalError.Run(r)
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
debugLogReqError(r, "Validate Response", false, r.Error)
return r.Error
}
debugLogReqError(r, "Validate Response", true, err)
continue
}
r.Handlers.Unmarshal.Run(r)
if r.Error != nil {
err := r.Error
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
debugLogReqError(r, "Unmarshal Response", false, r.Error)
return r.Error
}
debugLogReqError(r, "Unmarshal Response", true, err)
continue
}
break
}
return nil
}
// AddToUserAgent adds the string to the end of the request's current user agent.
func AddToUserAgent(r *Request, s string) {
curUA := r.HTTPRequest.Header.Get("User-Agent")
if len(curUA) > 0 {
s = curUA + " " + s
}
r.HTTPRequest.Header.Set("User-Agent", s)
}

104
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go

@ -0,0 +1,104 @@
package request
import (
"reflect"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
)
//type Paginater interface {
// HasNextPage() bool
// NextPage() *Request
// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error
//}
// HasNextPage returns true if this request has more pages of data available.
func (r *Request) HasNextPage() bool {
return len(r.nextPageTokens()) > 0
}
// nextPageTokens returns the tokens to use when asking for the next page of
// data.
func (r *Request) nextPageTokens() []interface{} {
if r.Operation.Paginator == nil {
return nil
}
if r.Operation.TruncationToken != "" {
tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
if len(tr) == 0 {
return nil
}
switch v := tr[0].(type) {
case *bool:
if !aws.BoolValue(v) {
return nil
}
case bool:
if v == false {
return nil
}
}
}
tokens := []interface{}{}
tokenAdded := false
for _, outToken := range r.Operation.OutputTokens {
v, _ := awsutil.ValuesAtPath(r.Data, outToken)
if len(v) > 0 {
tokens = append(tokens, v[0])
tokenAdded = true
} else {
tokens = append(tokens, nil)
}
}
if !tokenAdded {
return nil
}
return tokens
}
// NextPage returns a new Request that can be executed to return the next
// page of result data. Call .Send() on this request to execute it.
func (r *Request) NextPage() *Request {
tokens := r.nextPageTokens()
if len(tokens) == 0 {
return nil
}
data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
for i, intok := range nr.Operation.InputTokens {
awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
}
return nr
}
// EachPage iterates over each page of a paginated request object. The fn
// parameter should be a function with the following sample signature:
//
// func(page *T, lastPage bool) bool {
// return true // return false to stop iterating
// }
//
// Where "T" is the structure type matching the output structure of the given
// operation. For example, a request object generated by
// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
// as the structure "T". The lastPage value represents whether the page is
// the last page of data or not. The return value of this function should
// return true to keep iterating or false to stop.
func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
for page := r; page != nil; page = page.NextPage() {
if err := page.Send(); err != nil {
return err
}
if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
return page.Error
}
}
return nil
}

101
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go

@ -0,0 +1,101 @@
package request
import (
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// Retryer is an interface to control retry logic for a given service.
// The default implementation used by most services is the service.DefaultRetryer
// structure, which contains basic retry logic using exponential backoff.
type Retryer interface {
RetryRules(*Request) time.Duration
ShouldRetry(*Request) bool
MaxRetries() int
}
// WithRetryer sets a config Retryer value to the given Config returning it
// for chaining.
func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
cfg.Retryer = retryer
return cfg
}
// retryableCodes is a collection of service response codes which are retry-able
// without any further action.
var retryableCodes = map[string]struct{}{
"RequestError": {},
"RequestTimeout": {},
}
var throttleCodes = map[string]struct{}{
"ProvisionedThroughputExceededException": {},
"Throttling": {},
"ThrottlingException": {},
"RequestLimitExceeded": {},
"RequestThrottled": {},
"LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once
"TooManyRequestsException": {}, // Lambda functions
}
// credsExpiredCodes is a collection of error codes which signify the credentials
// need to be refreshed. Expired tokens require refreshing of credentials, and
// resigning before the request can be retried.
var credsExpiredCodes = map[string]struct{}{
"ExpiredToken": {},
"ExpiredTokenException": {},
"RequestExpired": {}, // EC2 Only
}
func isCodeThrottle(code string) bool {
_, ok := throttleCodes[code]
return ok
}
func isCodeRetryable(code string) bool {
if _, ok := retryableCodes[code]; ok {
return true
}
return isCodeExpiredCreds(code)
}
func isCodeExpiredCreds(code string) bool {
_, ok := credsExpiredCodes[code]
return ok
}
// IsErrorRetryable returns whether the error is retryable, based on its Code.
// Returns false if the request has no Error set.
func (r *Request) IsErrorRetryable() bool {
if r.Error != nil {
if err, ok := r.Error.(awserr.Error); ok {
return isCodeRetryable(err.Code())
}
}
return false
}
// IsErrorThrottle returns whether the error is to be throttled based on its code.
// Returns false if the request has no Error set
func (r *Request) IsErrorThrottle() bool {
if r.Error != nil {
if err, ok := r.Error.(awserr.Error); ok {
return isCodeThrottle(err.Code())
}
}
return false
}
// IsErrorExpired returns whether the error code is a credential expiry error.
// Returns false if the request has no Error set.
func (r *Request) IsErrorExpired() bool {
if r.Error != nil {
if err, ok := r.Error.(awserr.Error); ok {
return isCodeExpiredCreds(err.Code())
}
}
return false
}

234
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go

@ -0,0 +1,234 @@
package request
import (
"bytes"
"fmt"
"github.com/aws/aws-sdk-go/aws/awserr"
)
const (
// InvalidParameterErrCode is the error code for invalid parameters errors
InvalidParameterErrCode = "InvalidParameter"
// ParamRequiredErrCode is the error code for required parameter errors
ParamRequiredErrCode = "ParamRequiredError"
// ParamMinValueErrCode is the error code for fields with too low of a
// number value.
ParamMinValueErrCode = "ParamMinValueError"
// ParamMinLenErrCode is the error code for fields without enough elements.
ParamMinLenErrCode = "ParamMinLenError"
)
// Validator provides a way for types to perform validation logic on their
// input values that external code can use to determine if a type's values
// are valid.
type Validator interface {
Validate() error
}
// An ErrInvalidParams provides wrapping of invalid parameter errors found when
// validating API operation input parameters.
type ErrInvalidParams struct {
// Context is the base context of the invalid parameter group.
Context string
errs []ErrInvalidParam
}
// Add adds a new invalid parameter error to the collection of invalid
// parameters. The context of the invalid parameter will be updated to reflect
// this collection.
func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
err.SetContext(e.Context)
e.errs = append(e.errs, err)
}
// AddNested adds the invalid parameter errors from another ErrInvalidParams
// value into this collection. The nested errors will have their nested context
// updated and base context to reflect the merging.
//
// Use for nested validations errors.
func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
for _, err := range nested.errs {
err.SetContext(e.Context)
err.AddNestedContext(nestedCtx)
e.errs = append(e.errs, err)
}
}
// Len returns the number of invalid parameter errors
func (e ErrInvalidParams) Len() int {
return len(e.errs)
}
// Code returns the code of the error
func (e ErrInvalidParams) Code() string {
return InvalidParameterErrCode
}
// Message returns the message of the error
func (e ErrInvalidParams) Message() string {
return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
}
// Error returns the string formatted form of the invalid parameters.
func (e ErrInvalidParams) Error() string {
w := &bytes.Buffer{}
fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
for _, err := range e.errs {
fmt.Fprintf(w, "- %s\n", err.Message())
}
return w.String()
}
// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
func (e ErrInvalidParams) OrigErr() error {
return awserr.NewBatchError(
InvalidParameterErrCode, e.Message(), e.OrigErrs())
}
// OrigErrs returns a slice of the invalid parameters
func (e ErrInvalidParams) OrigErrs() []error {
errs := make([]error, len(e.errs))
for i := 0; i < len(errs); i++ {
errs[i] = e.errs[i]
}
return errs
}
// An ErrInvalidParam represents an invalid parameter error type.
type ErrInvalidParam interface {
awserr.Error
// Field name the error occurred on.
Field() string
// SetContext updates the context of the error.
SetContext(string)
// AddNestedContext updates the error's context to include a nested level.
AddNestedContext(string)
}
type errInvalidParam struct {
context string
nestedContext string
field string
code string
msg string
}
// Code returns the error code for the type of invalid parameter.
func (e *errInvalidParam) Code() string {
return e.code
}
// Message returns the reason the parameter was invalid, and its context.
func (e *errInvalidParam) Message() string {
return fmt.Sprintf("%s, %s.", e.msg, e.Field())
}
// Error returns the string version of the invalid parameter error.
func (e *errInvalidParam) Error() string {
return fmt.Sprintf("%s: %s", e.code, e.Message())
}
// OrigErr returns nil, Implemented for awserr.Error interface.
func (e *errInvalidParam) OrigErr() error {
return nil
}
// Field Returns the field and context the error occurred.
func (e *errInvalidParam) Field() string {
field := e.context
if len(field) > 0 {
field += "."
}
if len(e.nestedContext) > 0 {
field += fmt.Sprintf("%s.", e.nestedContext)
}
field += e.field
return field
}
// SetContext updates the base context of the error.
func (e *errInvalidParam) SetContext(ctx string) {
e.context = ctx
}
// AddNestedContext prepends a context to the field's path.
func (e *errInvalidParam) AddNestedContext(ctx string) {
if len(e.nestedContext) == 0 {
e.nestedContext = ctx
} else {
e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
}
}
// An ErrParamRequired represents an required parameter error.
type ErrParamRequired struct {
errInvalidParam
}
// NewErrParamRequired creates a new required parameter error.
func NewErrParamRequired(field string) *ErrParamRequired {
return &ErrParamRequired{
errInvalidParam{
code: ParamRequiredErrCode,
field: field,
msg: fmt.Sprintf("missing required field"),
},
}
}
// An ErrParamMinValue represents a minimum value parameter error.
type ErrParamMinValue struct {
errInvalidParam
min float64
}
// NewErrParamMinValue creates a new minimum value parameter error.
func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
return &ErrParamMinValue{
errInvalidParam: errInvalidParam{
code: ParamMinValueErrCode,
field: field,
msg: fmt.Sprintf("minimum field value of %v", min),
},
min: min,
}
}
// MinValue returns the field's require minimum value.
//
// float64 is returned for both int and float min values.
func (e *ErrParamMinValue) MinValue() float64 {
return e.min
}
// An ErrParamMinLen represents a minimum length parameter error.
type ErrParamMinLen struct {
errInvalidParam
min int
}
// NewErrParamMinLen creates a new minimum length parameter error.
func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
return &ErrParamMinLen{
errInvalidParam: errInvalidParam{
code: ParamMinValueErrCode,
field: field,
msg: fmt.Sprintf("minimum field size of %v", min),
},
min: min,
}
}
// MinLen returns the field's required minimum length.
func (e *ErrParamMinLen) MinLen() int {
return e.min
}

120
vendor/github.com/aws/aws-sdk-go/aws/session/session.go

@ -0,0 +1,120 @@
// Package session provides a way to create service clients with shared configuration
// and handlers.
//
// Generally this package should be used instead of the `defaults` package.
//
// A session should be used to share configurations and request handlers between multiple
// service clients. When service clients need specific configuration aws.Config can be
// used to provide additional configuration directly to the service client.
package session
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/endpoints"
)
// A Session provides a central location to create service clients from and
// store configurations and request handlers for those services.
//
// Sessions are safe to create service clients concurrently, but it is not safe
// to mutate the session concurrently.
type Session struct {
Config *aws.Config
Handlers request.Handlers
}
// New creates a new instance of the handlers merging in the provided Configs
// on top of the SDK's default configurations. Once the session is created it
// can be mutated to modify Configs or Handlers. The session is safe to be read
// concurrently, but it should not be written to concurrently.
//
// Example:
// // Create a session with the default config and request handlers.
// sess := session.New()
//
// // Create a session with a custom region
// sess := session.New(&aws.Config{Region: aws.String("us-east-1")})
//
// // Create a session, and add additional handlers for all service
// // clients created with the session to inherit. Adds logging handler.
// sess := session.New()
// sess.Handlers.Send.PushFront(func(r *request.Request) {
// // Log every request made and its payload
// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params)
// })
//
// // Create a S3 client instance from a session
// sess := session.New()
// svc := s3.New(sess)
func New(cfgs ...*aws.Config) *Session {
cfg := defaults.Config()
handlers := defaults.Handlers()
// Apply the passed in configs so the configuration can be applied to the
// default credential chain
cfg.MergeIn(cfgs...)
cfg.Credentials = defaults.CredChain(cfg, handlers)
// Reapply any passed in configs to override credentials if set
cfg.MergeIn(cfgs...)
s := &Session{
Config: cfg,
Handlers: handlers,
}
initHandlers(s)
return s
}
func initHandlers(s *Session) {
// Add the Validate parameter handler if it is not disabled.
s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
if !aws.BoolValue(s.Config.DisableParamValidation) {
s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
}
}
// Copy creates and returns a copy of the current session, coping the config
// and handlers. If any additional configs are provided they will be merged
// on top of the session's copied config.
//
// Example:
// // Create a copy of the current session, configured for the us-west-2 region.
// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
func (s *Session) Copy(cfgs ...*aws.Config) *Session {
newSession := &Session{
Config: s.Config.Copy(cfgs...),
Handlers: s.Handlers.Copy(),
}
initHandlers(newSession)
return newSession
}
// ClientConfig satisfies the client.ConfigProvider interface and is used to
// configure the service client instances. Passing the Session to the service
// client's constructor (New) will use this method to configure the client.
//
// Example:
// sess := session.New()
// s3.New(sess)
func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
s = s.Copy(cfgs...)
endpoint, signingRegion := endpoints.NormalizeEndpoint(
aws.StringValue(s.Config.Endpoint), serviceName,
aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL))
return client.Config{
Config: s.Config,
Handlers: s.Handlers,
Endpoint: endpoint,
SigningRegion: signingRegion,
}
}

106
vendor/github.com/aws/aws-sdk-go/aws/types.go

@ -0,0 +1,106 @@
package aws
import (
"io"
"sync"
)
// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser
func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
return ReaderSeekerCloser{r}
}
// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
// io.Closer interfaces to the underlying object if they are available.
type ReaderSeekerCloser struct {
r io.Reader
}
// Read reads from the reader up to size of p. The number of bytes read, and
// error if it occurred will be returned.
//
// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
//
// Performs the same functionality as io.Reader Read
func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
switch t := r.r.(type) {
case io.Reader:
return t.Read(p)
}
return 0, nil
}
// Seek sets the offset for the next Read to offset, interpreted according to
// whence: 0 means relative to the origin of the file, 1 means relative to the
// current offset, and 2 means relative to the end. Seek returns the new offset
// and an error, if any.
//
// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
switch t := r.r.(type) {
case io.Seeker:
return t.Seek(offset, whence)
}
return int64(0), nil
}
// Close closes the ReaderSeekerCloser.
//
// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
func (r ReaderSeekerCloser) Close() error {
switch t := r.r.(type) {
case io.Closer:
return t.Close()
}
return nil
}
// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
// Can be used with the s3manager.Downloader to download content to a buffer
// in memory. Safe to use concurrently.
type WriteAtBuffer struct {
buf []byte
m sync.Mutex
// GrowthCoeff defines the growth rate of the internal buffer. By
// default, the growth rate is 1, where expanding the internal
// buffer will allocate only enough capacity to fit the new expected
// length.
GrowthCoeff float64
}
// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
// provided by buf.
func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
return &WriteAtBuffer{buf: buf}
}
// WriteAt writes a slice of bytes to a buffer starting at the position provided
// The number of bytes written will be returned, or error. Can overwrite previous
// written slices if the write ats overlap.
func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
pLen := len(p)
expLen := pos + int64(pLen)
b.m.Lock()
defer b.m.Unlock()
if int64(len(b.buf)) < expLen {
if int64(cap(b.buf)) < expLen {
if b.GrowthCoeff < 1 {
b.GrowthCoeff = 1
}
newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
copy(newBuf, b.buf)
b.buf = newBuf
}
b.buf = b.buf[:expLen]
}
copy(b.buf[pos:], p)
return pLen, nil
}
// Bytes returns a slice of bytes written to the buffer.
func (b *WriteAtBuffer) Bytes() []byte {
b.m.Lock()
defer b.m.Unlock()
return b.buf[:len(b.buf):len(b.buf)]
}

8
vendor/github.com/aws/aws-sdk-go/aws/version.go

@ -0,0 +1,8 @@
// Package aws provides core functionality for making requests to AWS services.
package aws
// SDKName is the name of this AWS SDK
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.1.35"

4
vendor/github.com/aws/aws-sdk-go/private/README.md

@ -0,0 +1,4 @@
## AWS SDK for Go Private packages ##
`private` is a collection of packages used internally by the SDK, and is subject to have breaking changes. This package is not `internal` so that if you really need to use its functionality, and understand breaking changes will be made, you are able to.
These packages will be refactored in the future so that the API generator and model parsers are exposed cleanly on their own. Making it easier for you to generate your own code based on the API models.

65
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go

@ -0,0 +1,65 @@
// Package endpoints validates regional endpoints for services.
package endpoints
//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
//go:generate gofmt -s -w endpoints_map.go
import (
"fmt"
"regexp"
"strings"
)
// NormalizeEndpoint takes and endpoint and service API information to return a
// normalized endpoint and signing region. If the endpoint is not an empty string
// the service name and region will be used to look up the service's API endpoint.
// If the endpoint is provided the scheme will be added if it is not present.
func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) {
if endpoint == "" {
return EndpointForRegion(serviceName, region, disableSSL)
}
return AddScheme(endpoint, disableSSL), ""
}
// EndpointForRegion returns an endpoint and its signing region for a service and region.
// if the service and region pair are not found endpoint and signingRegion will be empty.
func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) {
derivedKeys := []string{
region + "/" + svcName,
region + "/*",
"*/" + svcName,
"*/*",
}
for _, key := range derivedKeys {
if val, ok := endpointsMap.Endpoints[key]; ok {
ep := val.Endpoint
ep = strings.Replace(ep, "{region}", region, -1)
ep = strings.Replace(ep, "{service}", svcName, -1)
endpoint = ep
signingRegion = val.SigningRegion
break
}
}
return AddScheme(endpoint, disableSSL), signingRegion
}
// Regular expression to determine if the endpoint string is prefixed with a scheme.
var schemeRE = regexp.MustCompile("^([^:]+)://")
// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS.
func AddScheme(endpoint string, disableSSL bool) string {
if endpoint != "" && !schemeRE.MatchString(endpoint) {
scheme := "https"
if disableSSL {
scheme = "http"
}
endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
}
return endpoint
}

75
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json

@ -0,0 +1,75 @@
{
"version": 2,
"endpoints": {
"*/*": {
"endpoint": "{service}.{region}.amazonaws.com"
},
"cn-north-1/*": {
"endpoint": "{service}.{region}.amazonaws.com.cn",
"signatureVersion": "v4"
},
"cn-north-1/ec2metadata": {
"endpoint": "http://169.254.169.254/latest"
},
"us-gov-west-1/iam": {
"endpoint": "iam.us-gov.amazonaws.com"
},
"us-gov-west-1/sts": {
"endpoint": "sts.us-gov-west-1.amazonaws.com"
},
"us-gov-west-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"us-gov-west-1/ec2metadata": {
"endpoint": "http://169.254.169.254/latest"
},
"*/cloudfront": {
"endpoint": "cloudfront.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/cloudsearchdomain": {
"endpoint": "",
"signingRegion": "us-east-1"
},
"*/data.iot": {
"endpoint": "",
"signingRegion": "us-east-1"
},
"*/ec2metadata": {
"endpoint": "http://169.254.169.254/latest"
},
"*/iam": {
"endpoint": "iam.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/importexport": {
"endpoint": "importexport.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/route53": {
"endpoint": "route53.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/sts": {
"endpoint": "sts.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/waf": {
"endpoint": "waf.amazonaws.com",
"signingRegion": "us-east-1"
},
"us-east-1/sdb": {
"endpoint": "sdb.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"us-east-1/s3": {
"endpoint": "s3.amazonaws.com"
},
"eu-central-1/s3": {
"endpoint": "{service}.{region}.amazonaws.com"
}
}
}

88
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go

@ -0,0 +1,88 @@
package endpoints
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
type endpointStruct struct {
Version int
Endpoints map[string]endpointEntry
}
type endpointEntry struct {
Endpoint string
SigningRegion string
}
var endpointsMap = endpointStruct{
Version: 2,
Endpoints: map[string]endpointEntry{
"*/*": {
Endpoint: "{service}.{region}.amazonaws.com",
},
"*/cloudfront": {
Endpoint: "cloudfront.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/cloudsearchdomain": {
Endpoint: "",
SigningRegion: "us-east-1",
},
"*/data.iot": {
Endpoint: "",
SigningRegion: "us-east-1",
},
"*/ec2metadata": {
Endpoint: "http://169.254.169.254/latest",
},
"*/iam": {
Endpoint: "iam.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/importexport": {
Endpoint: "importexport.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/route53": {
Endpoint: "route53.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"*/sts": {
Endpoint: "sts.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/waf": {
Endpoint: "waf.amazonaws.com",
SigningRegion: "us-east-1",
},
"cn-north-1/*": {
Endpoint: "{service}.{region}.amazonaws.com.cn",
},
"cn-north-1/ec2metadata": {
Endpoint: "http://169.254.169.254/latest",
},
"eu-central-1/s3": {
Endpoint: "{service}.{region}.amazonaws.com",
},
"us-east-1/s3": {
Endpoint: "s3.amazonaws.com",
},
"us-east-1/sdb": {
Endpoint: "sdb.amazonaws.com",
SigningRegion: "us-east-1",
},
"us-gov-west-1/ec2metadata": {
Endpoint: "http://169.254.169.254/latest",
},
"us-gov-west-1/iam": {
Endpoint: "iam.us-gov.amazonaws.com",
},
"us-gov-west-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"us-gov-west-1/sts": {
Endpoint: "sts.us-gov-west-1.amazonaws.com",
},
},
}

75
vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go

@ -0,0 +1,75 @@
package protocol
import (
"crypto/rand"
"fmt"
"reflect"
)
// RandReader is the random reader the protocol package will use to read
// random bytes from. This is exported for testing, and should not be used.
var RandReader = rand.Reader
const idempotencyTokenFillTag = `idempotencyToken`
// CanSetIdempotencyToken returns true if the struct field should be
// automatically populated with a Idempotency token.
//
// Only *string and string type fields that are tagged with idempotencyToken
// which are not already set can be auto filled.
func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
switch u := v.Interface().(type) {
// To auto fill an Idempotency token the field must be a string,
// tagged for auto fill, and have a zero value.
case *string:
return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
case string:
return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
}
return false
}
// GetIdempotencyToken returns a randomly generated idempotency token.
func GetIdempotencyToken() string {
b := make([]byte, 16)
RandReader.Read(b)
return UUIDVersion4(b)
}
// SetIdempotencyToken will set the value provided with a Idempotency Token.
// Given that the value can be set. Will panic if value is not setable.
func SetIdempotencyToken(v reflect.Value) {
if v.Kind() == reflect.Ptr {
if v.IsNil() && v.CanSet() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
v = reflect.Indirect(v)
if !v.CanSet() {
panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
}
b := make([]byte, 16)
_, err := rand.Read(b)
if err != nil {
// TODO handle error
return
}
v.Set(reflect.ValueOf(UUIDVersion4(b)))
}
// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
func UUIDVersion4(u []byte) string {
// https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
// 13th character is "4"
u[6] = (u[6] | 0x40) & 0x4F
// 17th character is "8", "9", "a", or "b"
u[8] = (u[8] | 0x80) & 0xBF
return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
}

36
vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go

@ -0,0 +1,36 @@
// Package query provides serialisation of AWS query requests, and responses.
package query
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
import (
"net/url"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
)
// BuildHandler is a named request handler for building query protocol requests
var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build}
// Build builds a request for an AWS Query service.
func Build(r *request.Request) {
body := url.Values{
"Action": {r.Operation.Name},
"Version": {r.ClientInfo.APIVersion},
}
if err := queryutil.Parse(body, r.Params, false); err != nil {
r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
return
}
if r.ExpireTime == 0 {
r.HTTPRequest.Method = "POST"
r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
r.SetBufferBody([]byte(body.Encode()))
} else { // This is a pre-signed request
r.HTTPRequest.Method = "GET"
r.HTTPRequest.URL.RawQuery = body.Encode()
}
}

230
vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go

@ -0,0 +1,230 @@
package queryutil
import (
"encoding/base64"
"fmt"
"net/url"
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/private/protocol"
)
// Parse parses an object i and fills a url.Values object. The isEC2 flag
// indicates if this is the EC2 Query sub-protocol.
func Parse(body url.Values, i interface{}, isEC2 bool) error {
q := queryParser{isEC2: isEC2}
return q.parseValue(body, reflect.ValueOf(i), "", "")
}
func elemOf(value reflect.Value) reflect.Value {
for value.Kind() == reflect.Ptr {
value = value.Elem()
}
return value
}
type queryParser struct {
isEC2 bool
}
func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
value = elemOf(value)
// no need to handle zero values
if !value.IsValid() {
return nil
}
t := tag.Get("type")
if t == "" {
switch value.Kind() {
case reflect.Struct:
t = "structure"
case reflect.Slice:
t = "list"
case reflect.Map:
t = "map"
}
}
switch t {
case "structure":
return q.parseStruct(v, value, prefix)
case "list":
return q.parseList(v, value, prefix, tag)
case "map":
return q.parseMap(v, value, prefix, tag)
default:
return q.parseScalar(v, value, prefix, tag)
}
}
func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
if !value.IsValid() {
return nil
}
t := value.Type()
for i := 0; i < value.NumField(); i++ {
elemValue := elemOf(value.Field(i))
field := t.Field(i)
if field.PkgPath != "" {
continue // ignore unexported fields
}
if protocol.CanSetIdempotencyToken(value.Field(i), field) {
token := protocol.GetIdempotencyToken()
elemValue = reflect.ValueOf(token)
}
var name string
if q.isEC2 {
name = field.Tag.Get("queryName")
}
if name == "" {
if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
name = field.Tag.Get("locationNameList")
} else if locName := field.Tag.Get("locationName"); locName != "" {
name = locName
}
if name != "" && q.isEC2 {
name = strings.ToUpper(name[0:1]) + name[1:]
}
}
if name == "" {
name = field.Name
}
if prefix != "" {
name = prefix + "." + name
}
if err := q.parseValue(v, elemValue, name, field.Tag); err != nil {
return err
}
}
return nil
}
func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
// If it's empty, generate an empty value
if !value.IsNil() && value.Len() == 0 {
v.Set(prefix, "")
return nil
}
// check for unflattened list member
if !q.isEC2 && tag.Get("flattened") == "" {
prefix += ".member"
}
for i := 0; i < value.Len(); i++ {
slicePrefix := prefix
if slicePrefix == "" {
slicePrefix = strconv.Itoa(i + 1)
} else {
slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
}
if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
return err
}
}
return nil
}
func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
// If it's empty, generate an empty value
if !value.IsNil() && value.Len() == 0 {
v.Set(prefix, "")
return nil
}
// check for unflattened list member
if !q.isEC2 && tag.Get("flattened") == "" {
prefix += ".entry"
}
// sort keys for improved serialization consistency.
// this is not strictly necessary for protocol support.
mapKeyValues := value.MapKeys()
mapKeys := map[string]reflect.Value{}
mapKeyNames := make([]string, len(mapKeyValues))
for i, mapKey := range mapKeyValues {
name := mapKey.String()
mapKeys[name] = mapKey
mapKeyNames[i] = name
}
sort.Strings(mapKeyNames)
for i, mapKeyName := range mapKeyNames {
mapKey := mapKeys[mapKeyName]
mapValue := value.MapIndex(mapKey)
kname := tag.Get("locationNameKey")
if kname == "" {
kname = "key"
}
vname := tag.Get("locationNameValue")
if vname == "" {
vname = "value"
}
// serialize key
var keyName string
if prefix == "" {
keyName = strconv.Itoa(i+1) + "." + kname
} else {
keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
}
if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
return err
}
// serialize value
var valueName string
if prefix == "" {
valueName = strconv.Itoa(i+1) + "." + vname
} else {
valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
}
if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
return err
}
}
return nil
}
func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
switch value := r.Interface().(type) {
case string:
v.Set(name, value)
case []byte:
if !r.IsNil() {
v.Set(name, base64.StdEncoding.EncodeToString(value))
}
case bool:
v.Set(name, strconv.FormatBool(value))
case int64:
v.Set(name, strconv.FormatInt(value, 10))
case int:
v.Set(name, strconv.Itoa(value))
case float64:
v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
case float32:
v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
case time.Time:
const ISO8601UTC = "2006-01-02T15:04:05Z"
v.Set(name, value.UTC().Format(ISO8601UTC))
default:
return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
}
return nil
}

35
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go

@ -0,0 +1,35 @@
package query
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
import (
"encoding/xml"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
)
// UnmarshalHandler is a named request handler for unmarshaling query protocol requests
var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal}
// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata
var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta}
// Unmarshal unmarshals a response for an AWS Query service.
func Unmarshal(r *request.Request) {
defer r.HTTPResponse.Body.Close()
if r.DataFilled() {
decoder := xml.NewDecoder(r.HTTPResponse.Body)
err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
if err != nil {
r.Error = awserr.New("SerializationError", "failed decoding Query response", err)
return
}
}
}
// UnmarshalMeta unmarshals header response values for an AWS Query service.
func UnmarshalMeta(r *request.Request) {
r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
}

66
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go

@ -0,0 +1,66 @@
package query
import (
"encoding/xml"
"io/ioutil"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
type xmlErrorResponse struct {
XMLName xml.Name `xml:"ErrorResponse"`
Code string `xml:"Error>Code"`
Message string `xml:"Error>Message"`
RequestID string `xml:"RequestId"`
}
type xmlServiceUnavailableResponse struct {
XMLName xml.Name `xml:"ServiceUnavailableException"`
}
// UnmarshalErrorHandler is a name request handler to unmarshal request errors
var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
// UnmarshalError unmarshals an error response for an AWS Query service.
func UnmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err)
return
}
// First check for specific error
resp := xmlErrorResponse{}
decodeErr := xml.Unmarshal(bodyBytes, &resp)
if decodeErr == nil {
reqID := resp.RequestID
if reqID == "" {
reqID = r.RequestID
}
r.Error = awserr.NewRequestFailure(
awserr.New(resp.Code, resp.Message, nil),
r.HTTPResponse.StatusCode,
reqID,
)
return
}
// Check for unhandled error
servUnavailResp := xmlServiceUnavailableResponse{}
unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
if unavailErr == nil {
r.Error = awserr.NewRequestFailure(
awserr.New("ServiceUnavailableException", "service is unavailable", nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
// Failed to retrieve any error message from the response body
r.Error = awserr.New("SerializationError",
"failed to decode query XML error response", decodeErr)
}

256
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go

@ -0,0 +1,256 @@
// Package rest provides RESTful serialization of AWS requests and responses.
package rest
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"net/http"
"net/url"
"path"
"reflect"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// RFC822 returns an RFC822 formatted timestamp for AWS protocols
const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
// Whether the byte value can be sent without escaping in AWS URLs
var noEscape [256]bool
var errValueNotSet = fmt.Errorf("value not set")
func init() {
for i := 0; i < len(noEscape); i++ {
// AWS expects every character except these to be escaped
noEscape[i] = (i >= 'A' && i <= 'Z') ||
(i >= 'a' && i <= 'z') ||
(i >= '0' && i <= '9') ||
i == '-' ||
i == '.' ||
i == '_' ||
i == '~'
}
}
// BuildHandler is a named request handler for building rest protocol requests
var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
// Build builds the REST component of a service request.
func Build(r *request.Request) {
if r.ParamsFilled() {
v := reflect.ValueOf(r.Params).Elem()
buildLocationElements(r, v)
buildBody(r, v)
}
}
func buildLocationElements(r *request.Request, v reflect.Value) {
query := r.HTTPRequest.URL.Query()
for i := 0; i < v.NumField(); i++ {
m := v.Field(i)
if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
continue
}
if m.IsValid() {
field := v.Type().Field(i)
name := field.Tag.Get("locationName")
if name == "" {
name = field.Name
}
if m.Kind() == reflect.Ptr {
m = m.Elem()
}
if !m.IsValid() {
continue
}
var err error
switch field.Tag.Get("location") {
case "headers": // header maps
err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName"))
case "header":
err = buildHeader(&r.HTTPRequest.Header, m, name)
case "uri":
err = buildURI(r.HTTPRequest.URL, m, name)
case "querystring":
err = buildQueryString(query, m, name)
}
r.Error = err
}
if r.Error != nil {
return
}
}
r.HTTPRequest.URL.RawQuery = query.Encode()
updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path)
}
func buildBody(r *request.Request, v reflect.Value) {
if field, ok := v.Type().FieldByName("_"); ok {
if payloadName := field.Tag.Get("payload"); payloadName != "" {
pfield, _ := v.Type().FieldByName(payloadName)
if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
payload := reflect.Indirect(v.FieldByName(payloadName))
if payload.IsValid() && payload.Interface() != nil {
switch reader := payload.Interface().(type) {
case io.ReadSeeker:
r.SetReaderBody(reader)
case []byte:
r.SetBufferBody(reader)
case string:
r.SetStringBody(reader)
default:
r.Error = awserr.New("SerializationError",
"failed to encode REST request",
fmt.Errorf("unknown payload type %s", payload.Type()))
}
}
}
}
}
}
func buildHeader(header *http.Header, v reflect.Value, name string) error {
str, err := convertType(v)
if err == errValueNotSet {
return nil
} else if err != nil {
return awserr.New("SerializationError", "failed to encode REST request", err)
}
header.Add(name, str)
return nil
}
func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error {
for _, key := range v.MapKeys() {
str, err := convertType(v.MapIndex(key))
if err == errValueNotSet {
continue
} else if err != nil {
return awserr.New("SerializationError", "failed to encode REST request", err)
}
header.Add(prefix+key.String(), str)
}
return nil
}
func buildURI(u *url.URL, v reflect.Value, name string) error {
value, err := convertType(v)
if err == errValueNotSet {
return nil
} else if err != nil {
return awserr.New("SerializationError", "failed to encode REST request", err)
}
uri := u.Path
uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1)
uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1)
u.Path = uri
return nil
}
func buildQueryString(query url.Values, v reflect.Value, name string) error {
switch value := v.Interface().(type) {
case []*string:
for _, item := range value {
query.Add(name, *item)
}
case map[string]*string:
for key, item := range value {
query.Add(key, *item)
}
case map[string][]*string:
for key, items := range value {
for _, item := range items {
query.Add(key, *item)
}
}
default:
str, err := convertType(v)
if err == errValueNotSet {
return nil
} else if err != nil {
return awserr.New("SerializationError", "failed to encode REST request", err)
}
query.Set(name, str)
}
return nil
}
func updatePath(url *url.URL, urlPath string) {
scheme, query := url.Scheme, url.RawQuery
hasSlash := strings.HasSuffix(urlPath, "/")
// clean up path
urlPath = path.Clean(urlPath)
if hasSlash && !strings.HasSuffix(urlPath, "/") {
urlPath += "/"
}
// get formatted URL minus scheme so we can build this into Opaque
url.Scheme, url.Path, url.RawQuery = "", "", ""
s := url.String()
url.Scheme = scheme
url.RawQuery = query
// build opaque URI
url.Opaque = s + urlPath
}
// EscapePath escapes part of a URL path in Amazon style
func EscapePath(path string, encodeSep bool) string {
var buf bytes.Buffer
for i := 0; i < len(path); i++ {
c := path[i]
if noEscape[c] || (c == '/' && !encodeSep) {
buf.WriteByte(c)
} else {
fmt.Fprintf(&buf, "%%%02X", c)
}
}
return buf.String()
}
func convertType(v reflect.Value) (string, error) {
v = reflect.Indirect(v)
if !v.IsValid() {
return "", errValueNotSet
}
var str string
switch value := v.Interface().(type) {
case string:
str = value
case []byte:
str = base64.StdEncoding.EncodeToString(value)
case bool:
str = strconv.FormatBool(value)
case int64:
str = strconv.FormatInt(value, 10)
case float64:
str = strconv.FormatFloat(value, 'f', -1, 64)
case time.Time:
str = value.UTC().Format(RFC822)
default:
err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
return "", err
}
return str, nil
}

45
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go

@ -0,0 +1,45 @@
package rest
import "reflect"
// PayloadMember returns the payload field member of i if there is one, or nil.
func PayloadMember(i interface{}) interface{} {
if i == nil {
return nil
}
v := reflect.ValueOf(i).Elem()
if !v.IsValid() {
return nil
}
if field, ok := v.Type().FieldByName("_"); ok {
if payloadName := field.Tag.Get("payload"); payloadName != "" {
field, _ := v.Type().FieldByName(payloadName)
if field.Tag.Get("type") != "structure" {
return nil
}
payload := v.FieldByName(payloadName)
if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
return payload.Interface()
}
}
}
return nil
}
// PayloadType returns the type of a payload field member of i if there is one, or "".
func PayloadType(i interface{}) string {
v := reflect.Indirect(reflect.ValueOf(i))
if !v.IsValid() {
return ""
}
if field, ok := v.Type().FieldByName("_"); ok {
if payloadName := field.Tag.Get("payload"); payloadName != "" {
if member, ok := v.Type().FieldByName(payloadName); ok {
return member.Tag.Get("type")
}
}
}
return ""
}

198
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go

@ -0,0 +1,198 @@
package rest
import (
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"net/http"
"reflect"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal}
// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata
var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta}
// Unmarshal unmarshals the REST component of a response in a REST service.
func Unmarshal(r *request.Request) {
if r.DataFilled() {
v := reflect.Indirect(reflect.ValueOf(r.Data))
unmarshalBody(r, v)
}
}
// UnmarshalMeta unmarshals the REST metadata of a response in a REST service
func UnmarshalMeta(r *request.Request) {
r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
if r.RequestID == "" {
// Alternative version of request id in the header
r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
}
if r.DataFilled() {
v := reflect.Indirect(reflect.ValueOf(r.Data))
unmarshalLocationElements(r, v)
}
}
func unmarshalBody(r *request.Request, v reflect.Value) {
if field, ok := v.Type().FieldByName("_"); ok {
if payloadName := field.Tag.Get("payload"); payloadName != "" {
pfield, _ := v.Type().FieldByName(payloadName)
if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
payload := v.FieldByName(payloadName)
if payload.IsValid() {
switch payload.Interface().(type) {
case []byte:
defer r.HTTPResponse.Body.Close()
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
} else {
payload.Set(reflect.ValueOf(b))
}
case *string:
defer r.HTTPResponse.Body.Close()
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
} else {
str := string(b)
payload.Set(reflect.ValueOf(&str))
}
default:
switch payload.Type().String() {
case "io.ReadSeeker":
payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body)))
case "aws.ReadSeekCloser", "io.ReadCloser":
payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
default:
io.Copy(ioutil.Discard, r.HTTPResponse.Body)
defer r.HTTPResponse.Body.Close()
r.Error = awserr.New("SerializationError",
"failed to decode REST response",
fmt.Errorf("unknown payload type %s", payload.Type()))
}
}
}
}
}
}
}
func unmarshalLocationElements(r *request.Request, v reflect.Value) {
for i := 0; i < v.NumField(); i++ {
m, field := v.Field(i), v.Type().Field(i)
if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
continue
}
if m.IsValid() {
name := field.Tag.Get("locationName")
if name == "" {
name = field.Name
}
switch field.Tag.Get("location") {
case "statusCode":
unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
case "header":
err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name))
if err != nil {
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
break
}
case "headers":
prefix := field.Tag.Get("locationName")
err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
break
}
}
}
if r.Error != nil {
return
}
}
}
func unmarshalStatusCode(v reflect.Value, statusCode int) {
if !v.IsValid() {
return
}
switch v.Interface().(type) {
case *int64:
s := int64(statusCode)
v.Set(reflect.ValueOf(&s))
}
}
func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
switch r.Interface().(type) {
case map[string]*string: // we only support string map value types
out := map[string]*string{}
for k, v := range headers {
k = http.CanonicalHeaderKey(k)
if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
out[k[len(prefix):]] = &v[0]
}
}
r.Set(reflect.ValueOf(out))
}
return nil
}
func unmarshalHeader(v reflect.Value, header string) error {
if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
return nil
}
switch v.Interface().(type) {
case *string:
v.Set(reflect.ValueOf(&header))
case []byte:
b, err := base64.StdEncoding.DecodeString(header)
if err != nil {
return err
}
v.Set(reflect.ValueOf(&b))
case *bool:
b, err := strconv.ParseBool(header)
if err != nil {
return err
}
v.Set(reflect.ValueOf(&b))
case *int64:
i, err := strconv.ParseInt(header, 10, 64)
if err != nil {
return err
}
v.Set(reflect.ValueOf(&i))
case *float64:
f, err := strconv.ParseFloat(header, 64)
if err != nil {
return err
}
v.Set(reflect.ValueOf(&f))
case *time.Time:
t, err := time.Parse(RFC822, header)
if err != nil {
return err
}
v.Set(reflect.ValueOf(&t))
default:
err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
return err
}
return nil
}

21
vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go

@ -0,0 +1,21 @@
package protocol
import (
"io"
"io/ioutil"
"github.com/aws/aws-sdk-go/aws/request"
)
// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
func UnmarshalDiscardBody(r *request.Request) {
if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
return
}
io.Copy(ioutil.Discard, r.HTTPResponse.Body)
r.HTTPResponse.Body.Close()
}

293
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go

@ -0,0 +1,293 @@
// Package xmlutil provides XML serialisation of AWS requests and responses.
package xmlutil
import (
"encoding/base64"
"encoding/xml"
"fmt"
"reflect"
"sort"
"strconv"
"time"
"github.com/aws/aws-sdk-go/private/protocol"
)
// BuildXML will serialize params into an xml.Encoder.
// Error will be returned if the serialization of any of the params or nested values fails.
func BuildXML(params interface{}, e *xml.Encoder) error {
b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
root := NewXMLElement(xml.Name{})
if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
return err
}
for _, c := range root.Children {
for _, v := range c {
return StructToXML(e, v, false)
}
}
return nil
}
// Returns the reflection element of a value, if it is a pointer.
func elemOf(value reflect.Value) reflect.Value {
for value.Kind() == reflect.Ptr {
value = value.Elem()
}
return value
}
// A xmlBuilder serializes values from Go code to XML
type xmlBuilder struct {
encoder *xml.Encoder
namespaces map[string]string
}
// buildValue generic XMLNode builder for any type. Will build value for their specific type
// struct, list, map, scalar.
//
// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
// type is not provided reflect will be used to determine the value's type.
func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
value = elemOf(value)
if !value.IsValid() { // no need to handle zero values
return nil
} else if tag.Get("location") != "" { // don't handle non-body location values
return nil
}
t := tag.Get("type")
if t == "" {
switch value.Kind() {
case reflect.Struct:
t = "structure"
case reflect.Slice:
t = "list"
case reflect.Map:
t = "map"
}
}
switch t {
case "structure":
if field, ok := value.Type().FieldByName("_"); ok {
tag = tag + reflect.StructTag(" ") + field.Tag
}
return b.buildStruct(value, current, tag)
case "list":
return b.buildList(value, current, tag)
case "map":
return b.buildMap(value, current, tag)
default:
return b.buildScalar(value, current, tag)
}
}
// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested
// types are converted to XMLNodes also.
func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
if !value.IsValid() {
return nil
}
fieldAdded := false
// unwrap payloads
if payload := tag.Get("payload"); payload != "" {
field, _ := value.Type().FieldByName(payload)
tag = field.Tag
value = elemOf(value.FieldByName(payload))
if !value.IsValid() {
return nil
}
}
child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
// there is an xmlNamespace associated with this struct
if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
ns := xml.Attr{
Name: xml.Name{Local: "xmlns"},
Value: uri,
}
if prefix != "" {
b.namespaces[prefix] = uri // register the namespace
ns.Name.Local = "xmlns:" + prefix
}
child.Attr = append(child.Attr, ns)
}
t := value.Type()
for i := 0; i < value.NumField(); i++ {
member := elemOf(value.Field(i))
field := t.Field(i)
if field.PkgPath != "" {
continue // ignore unexported fields
}
mTag := field.Tag
if mTag.Get("location") != "" { // skip non-body members
continue
}
if protocol.CanSetIdempotencyToken(value.Field(i), field) {
token := protocol.GetIdempotencyToken()
member = reflect.ValueOf(token)
}
memberName := mTag.Get("locationName")
if memberName == "" {
memberName = field.Name
mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
}
if err := b.buildValue(member, child, mTag); err != nil {
return err
}
fieldAdded = true
}
if fieldAdded { // only append this child if we have one ore more valid members
current.AddChild(child)
}
return nil
}
// buildList adds the value's list items to the current XMLNode as children nodes. All
// nested values in the list are converted to XMLNodes also.
func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
if value.IsNil() { // don't build omitted lists
return nil
}
// check for unflattened list member
flattened := tag.Get("flattened") != ""
xname := xml.Name{Local: tag.Get("locationName")}
if flattened {
for i := 0; i < value.Len(); i++ {
child := NewXMLElement(xname)
current.AddChild(child)
if err := b.buildValue(value.Index(i), child, ""); err != nil {
return err
}
}
} else {
list := NewXMLElement(xname)
current.AddChild(list)
for i := 0; i < value.Len(); i++ {
iname := tag.Get("locationNameList")
if iname == "" {
iname = "member"
}
child := NewXMLElement(xml.Name{Local: iname})
list.AddChild(child)
if err := b.buildValue(value.Index(i), child, ""); err != nil {
return err
}
}
}
return nil
}
// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
// nested values in the map are converted to XMLNodes also.
//
// Error will be returned if it is unable to build the map's values into XMLNodes
func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
if value.IsNil() { // don't build omitted maps
return nil
}
maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
current.AddChild(maproot)
current = maproot
kname, vname := "key", "value"
if n := tag.Get("locationNameKey"); n != "" {
kname = n
}
if n := tag.Get("locationNameValue"); n != "" {
vname = n
}
// sorting is not required for compliance, but it makes testing easier
keys := make([]string, value.Len())
for i, k := range value.MapKeys() {
keys[i] = k.String()
}
sort.Strings(keys)
for _, k := range keys {
v := value.MapIndex(reflect.ValueOf(k))
mapcur := current
if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
child := NewXMLElement(xml.Name{Local: "entry"})
mapcur.AddChild(child)
mapcur = child
}
kchild := NewXMLElement(xml.Name{Local: kname})
kchild.Text = k
vchild := NewXMLElement(xml.Name{Local: vname})
mapcur.AddChild(kchild)
mapcur.AddChild(vchild)
if err := b.buildValue(v, vchild, ""); err != nil {
return err
}
}
return nil
}
// buildScalar will convert the value into a string and append it as a attribute or child
// of the current XMLNode.
//
// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
//
// Error will be returned if the value type is unsupported.
func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
var str string
switch converted := value.Interface().(type) {
case string:
str = converted
case []byte:
if !value.IsNil() {
str = base64.StdEncoding.EncodeToString(converted)
}
case bool:
str = strconv.FormatBool(converted)
case int64:
str = strconv.FormatInt(converted, 10)
case int:
str = strconv.Itoa(converted)
case float64:
str = strconv.FormatFloat(converted, 'f', -1, 64)
case float32:
str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
case time.Time:
const ISO8601UTC = "2006-01-02T15:04:05Z"
str = converted.UTC().Format(ISO8601UTC)
default:
return fmt.Errorf("unsupported value for param %s: %v (%s)",
tag.Get("locationName"), value.Interface(), value.Type().Name())
}
xname := xml.Name{Local: tag.Get("locationName")}
if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
attr := xml.Attr{Name: xname, Value: str}
current.Attr = append(current.Attr, attr)
} else { // regular text node
current.AddChild(&XMLNode{Name: xname, Text: str})
}
return nil
}

260
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go

@ -0,0 +1,260 @@
package xmlutil
import (
"encoding/base64"
"encoding/xml"
"fmt"
"io"
"reflect"
"strconv"
"strings"
"time"
)
// UnmarshalXML deserializes an xml.Decoder into the container v. V
// needs to match the shape of the XML expected to be decoded.
// If the shape doesn't match unmarshaling will fail.
func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
n, _ := XMLToStruct(d, nil)
if n.Children != nil {
for _, root := range n.Children {
for _, c := range root {
if wrappedChild, ok := c.Children[wrapper]; ok {
c = wrappedChild[0] // pull out wrapped element
}
err := parse(reflect.ValueOf(v), c, "")
if err != nil {
if err == io.EOF {
return nil
}
return err
}
}
}
return nil
}
return nil
}
// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
// will be used to determine the type from r.
func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
rtype := r.Type()
if rtype.Kind() == reflect.Ptr {
rtype = rtype.Elem() // check kind of actual element type
}
t := tag.Get("type")
if t == "" {
switch rtype.Kind() {
case reflect.Struct:
t = "structure"
case reflect.Slice:
t = "list"
case reflect.Map:
t = "map"
}
}
switch t {
case "structure":
if field, ok := rtype.FieldByName("_"); ok {
tag = field.Tag
}
return parseStruct(r, node, tag)
case "list":
return parseList(r, node, tag)
case "map":
return parseMap(r, node, tag)
default:
return parseScalar(r, node, tag)
}
}
// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
// types in the structure will also be deserialized.
func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
t := r.Type()
if r.Kind() == reflect.Ptr {
if r.IsNil() { // create the structure if it's nil
s := reflect.New(r.Type().Elem())
r.Set(s)
r = s
}
r = r.Elem()
t = t.Elem()
}
// unwrap any payloads
if payload := tag.Get("payload"); payload != "" {
field, _ := t.FieldByName(payload)
return parseStruct(r.FieldByName(payload), node, field.Tag)
}
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if c := field.Name[0:1]; strings.ToLower(c) == c {
continue // ignore unexported fields
}
// figure out what this field is called
name := field.Name
if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
name = field.Tag.Get("locationNameList")
} else if locName := field.Tag.Get("locationName"); locName != "" {
name = locName
}
// try to find the field by name in elements
elems := node.Children[name]
if elems == nil { // try to find the field in attributes
for _, a := range node.Attr {
if name == a.Name.Local {
// turn this into a text node for de-serializing
elems = []*XMLNode{{Text: a.Value}}
}
}
}
member := r.FieldByName(field.Name)
for _, elem := range elems {
err := parse(member, elem, field.Tag)
if err != nil {
return err
}
}
}
return nil
}
// parseList deserializes a list of values from an XML node. Each list entry
// will also be deserialized.
func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
t := r.Type()
if tag.Get("flattened") == "" { // look at all item entries
mname := "member"
if name := tag.Get("locationNameList"); name != "" {
mname = name
}
if Children, ok := node.Children[mname]; ok {
if r.IsNil() {
r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
}
for i, c := range Children {
err := parse(r.Index(i), c, "")
if err != nil {
return err
}
}
}
} else { // flattened list means this is a single element
if r.IsNil() {
r.Set(reflect.MakeSlice(t, 0, 0))
}
childR := reflect.Zero(t.Elem())
r.Set(reflect.Append(r, childR))
err := parse(r.Index(r.Len()-1), node, "")
if err != nil {
return err
}
}
return nil
}
// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
// will also be deserialized as map entries.
func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
if r.IsNil() {
r.Set(reflect.MakeMap(r.Type()))
}
if tag.Get("flattened") == "" { // look at all child entries
for _, entry := range node.Children["entry"] {
parseMapEntry(r, entry, tag)
}
} else { // this element is itself an entry
parseMapEntry(r, node, tag)
}
return nil
}
// parseMapEntry deserializes a map entry from a XML node.
func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
kname, vname := "key", "value"
if n := tag.Get("locationNameKey"); n != "" {
kname = n
}
if n := tag.Get("locationNameValue"); n != "" {
vname = n
}
keys, ok := node.Children[kname]
values := node.Children[vname]
if ok {
for i, key := range keys {
keyR := reflect.ValueOf(key.Text)
value := values[i]
valueR := reflect.New(r.Type().Elem()).Elem()
parse(valueR, value, "")
r.SetMapIndex(keyR, valueR)
}
}
return nil
}
// parseScaller deserializes an XMLNode value into a concrete type based on the
// interface type of r.
//
// Error is returned if the deserialization fails due to invalid type conversion,
// or unsupported interface type.
func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
switch r.Interface().(type) {
case *string:
r.Set(reflect.ValueOf(&node.Text))
return nil
case []byte:
b, err := base64.StdEncoding.DecodeString(node.Text)
if err != nil {
return err
}
r.Set(reflect.ValueOf(b))
case *bool:
v, err := strconv.ParseBool(node.Text)
if err != nil {
return err
}
r.Set(reflect.ValueOf(&v))
case *int64:
v, err := strconv.ParseInt(node.Text, 10, 64)
if err != nil {
return err
}
r.Set(reflect.ValueOf(&v))
case *float64:
v, err := strconv.ParseFloat(node.Text, 64)
if err != nil {
return err
}
r.Set(reflect.ValueOf(&v))
case *time.Time:
const ISO8601UTC = "2006-01-02T15:04:05Z"
t, err := time.Parse(ISO8601UTC, node.Text)
if err != nil {
return err
}
r.Set(reflect.ValueOf(&t))
default:
return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
}
return nil
}

105
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go

@ -0,0 +1,105 @@
package xmlutil
import (
"encoding/xml"
"io"
"sort"
)
// A XMLNode contains the values to be encoded or decoded.
type XMLNode struct {
Name xml.Name `json:",omitempty"`
Children map[string][]*XMLNode `json:",omitempty"`
Text string `json:",omitempty"`
Attr []xml.Attr `json:",omitempty"`
}
// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
func NewXMLElement(name xml.Name) *XMLNode {
return &XMLNode{
Name: name,
Children: map[string][]*XMLNode{},
Attr: []xml.Attr{},
}
}
// AddChild adds child to the XMLNode.
func (n *XMLNode) AddChild(child *XMLNode) {
if _, ok := n.Children[child.Name.Local]; !ok {
n.Children[child.Name.Local] = []*XMLNode{}
}
n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
}
// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
out := &XMLNode{}
for {
tok, err := d.Token()
if tok == nil || err == io.EOF {
break
}
if err != nil {
return out, err
}
switch typed := tok.(type) {
case xml.CharData:
out.Text = string(typed.Copy())
case xml.StartElement:
el := typed.Copy()
out.Attr = el.Attr
if out.Children == nil {
out.Children = map[string][]*XMLNode{}
}
name := typed.Name.Local
slice := out.Children[name]
if slice == nil {
slice = []*XMLNode{}
}
node, e := XMLToStruct(d, &el)
if e != nil {
return out, e
}
node.Name = typed.Name
slice = append(slice, node)
out.Children[name] = slice
case xml.EndElement:
if s != nil && s.Name.Local == typed.Name.Local { // matching end token
return out, nil
}
}
}
return out, nil
}
// StructToXML writes an XMLNode to a xml.Encoder as tokens.
func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
if node.Text != "" {
e.EncodeToken(xml.CharData([]byte(node.Text)))
} else if sorted {
sortedNames := []string{}
for k := range node.Children {
sortedNames = append(sortedNames, k)
}
sort.Strings(sortedNames)
for _, k := range sortedNames {
for _, v := range node.Children[k] {
StructToXML(e, v, sorted)
}
}
} else {
for _, c := range node.Children {
for _, v := range c {
StructToXML(e, v, sorted)
}
}
}
e.EncodeToken(xml.EndElement{Name: node.Name})
return e.Flush()
}

82
vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go

@ -0,0 +1,82 @@
package v4
import (
"net/http"
"strings"
)
// validator houses a set of rule needed for validation of a
// string value
type rules []rule
// rule interface allows for more flexible rules and just simply
// checks whether or not a value adheres to that rule
type rule interface {
IsValid(value string) bool
}
// IsValid will iterate through all rules and see if any rules
// apply to the value and supports nested rules
func (r rules) IsValid(value string) bool {
for _, rule := range r {
if rule.IsValid(value) {
return true
}
}
return false
}
// mapRule generic rule for maps
type mapRule map[string]struct{}
// IsValid for the map rule satisfies whether it exists in the map
func (m mapRule) IsValid(value string) bool {
_, ok := m[value]
return ok
}
// whitelist is a generic rule for whitelisting
type whitelist struct {
rule
}
// IsValid for whitelist checks if the value is within the whitelist
func (w whitelist) IsValid(value string) bool {
return w.rule.IsValid(value)
}
// blacklist is a generic rule for blacklisting
type blacklist struct {
rule
}
// IsValid for whitelist checks if the value is within the whitelist
func (b blacklist) IsValid(value string) bool {
return !b.rule.IsValid(value)
}
type patterns []string
// IsValid for patterns checks each pattern and returns if a match has
// been found
func (p patterns) IsValid(value string) bool {
for _, pattern := range p {
if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
return true
}
}
return false
}
// inclusiveRules rules allow for rules to depend on one another
type inclusiveRules []rule
// IsValid will return true if all rules are true
func (r inclusiveRules) IsValid(value string) bool {
for _, rule := range r {
if !rule.IsValid(value) {
return false
}
}
return true
}

465
vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go

@ -0,0 +1,465 @@
// Package v4 implements signing for AWS V4 signer
package v4
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/rest"
)
const (
authHeaderPrefix = "AWS4-HMAC-SHA256"
timeFormat = "20060102T150405Z"
shortTimeFormat = "20060102"
)
var ignoredHeaders = rules{
blacklist{
mapRule{
"Authorization": struct{}{},
"User-Agent": struct{}{},
},
},
}
// requiredSignedHeaders is a whitelist for build canonical headers.
var requiredSignedHeaders = rules{
whitelist{
mapRule{
"Cache-Control": struct{}{},
"Content-Disposition": struct{}{},
"Content-Encoding": struct{}{},
"Content-Language": struct{}{},
"Content-Md5": struct{}{},
"Content-Type": struct{}{},
"Expires": struct{}{},
"If-Match": struct{}{},
"If-Modified-Since": struct{}{},
"If-None-Match": struct{}{},
"If-Unmodified-Since": struct{}{},
"Range": struct{}{},
"X-Amz-Acl": struct{}{},
"X-Amz-Copy-Source": struct{}{},
"X-Amz-Copy-Source-If-Match": struct{}{},
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
"X-Amz-Copy-Source-If-None-Match": struct{}{},
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
"X-Amz-Copy-Source-Range": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Grant-Full-control": struct{}{},
"X-Amz-Grant-Read": struct{}{},
"X-Amz-Grant-Read-Acp": struct{}{},
"X-Amz-Grant-Write": struct{}{},
"X-Amz-Grant-Write-Acp": struct{}{},
"X-Amz-Metadata-Directive": struct{}{},
"X-Amz-Mfa": struct{}{},
"X-Amz-Request-Payer": struct{}{},
"X-Amz-Server-Side-Encryption": struct{}{},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Storage-Class": struct{}{},
"X-Amz-Website-Redirect-Location": struct{}{},
},
},
patterns{"X-Amz-Meta-"},
}
// allowedHoisting is a whitelist for build query headers. The boolean value
// represents whether or not it is a pattern.
var allowedQueryHoisting = inclusiveRules{
blacklist{requiredSignedHeaders},
patterns{"X-Amz-"},
}
type signer struct {
Request *http.Request
Time time.Time
ExpireTime time.Duration
ServiceName string
Region string
CredValues credentials.Value
Credentials *credentials.Credentials
Query url.Values
Body io.ReadSeeker
Debug aws.LogLevelType
Logger aws.Logger
isPresign bool
formattedTime string
formattedShortTime string
signedHeaders string
canonicalHeaders string
canonicalString string
credentialString string
stringToSign string
signature string
authorization string
notHoist bool
signedHeaderVals http.Header
}
// Sign requests with signature version 4.
//
// Will sign the requests with the service config's Credentials object
// Signing is skipped if the credentials is the credentials.AnonymousCredentials
// object.
func Sign(req *request.Request) {
// If the request does not need to be signed ignore the signing of the
// request if the AnonymousCredentials object is used.
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
region := req.ClientInfo.SigningRegion
if region == "" {
region = aws.StringValue(req.Config.Region)
}
name := req.ClientInfo.SigningName
if name == "" {
name = req.ClientInfo.ServiceName
}
s := signer{
Request: req.HTTPRequest,
Time: req.Time,
ExpireTime: req.ExpireTime,
Query: req.HTTPRequest.URL.Query(),
Body: req.Body,
ServiceName: name,
Region: region,
Credentials: req.Config.Credentials,
Debug: req.Config.LogLevel.Value(),
Logger: req.Config.Logger,
notHoist: req.NotHoist,
}
req.Error = s.sign()
req.Time = s.Time
req.SignedHeaderVals = s.signedHeaderVals
}
func (v4 *signer) sign() error {
if v4.ExpireTime != 0 {
v4.isPresign = true
}
if v4.isRequestSigned() {
if !v4.Credentials.IsExpired() && time.Now().Before(v4.Time.Add(10*time.Minute)) {
// If the request is already signed, and the credentials have not
// expired, and the request is not too old ignore the signing request.
return nil
}
v4.Time = time.Now()
// The credentials have expired for this request. The current signing
// is invalid, and needs to be request because the request will fail.
if v4.isPresign {
v4.removePresign()
// Update the request's query string to ensure the values stays in
// sync in the case retrieving the new credentials fails.
v4.Request.URL.RawQuery = v4.Query.Encode()
}
}
var err error
v4.CredValues, err = v4.Credentials.Get()
if err != nil {
return err
}
if v4.isPresign {
v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
if v4.CredValues.SessionToken != "" {
v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
} else {
v4.Query.Del("X-Amz-Security-Token")
}
} else if v4.CredValues.SessionToken != "" {
v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
}
v4.build()
if v4.Debug.Matches(aws.LogDebugWithSigning) {
v4.logSigningInfo()
}
return nil
}
const logSignInfoMsg = `DEBUG: Request Signiture:
---[ CANONICAL STRING ]-----------------------------
%s
---[ STRING TO SIGN ]--------------------------------
%s%s
-----------------------------------------------------`
const logSignedURLMsg = `
---[ SIGNED URL ]------------------------------------
%s`
func (v4 *signer) logSigningInfo() {
signedURLMsg := ""
if v4.isPresign {
signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String())
}
msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg)
v4.Logger.Log(msg)
}
func (v4 *signer) build() {
v4.buildTime() // no depends
v4.buildCredentialString() // no depends
unsignedHeaders := v4.Request.Header
if v4.isPresign {
if !v4.notHoist {
urlValues := url.Values{}
urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
for k := range urlValues {
v4.Query[k] = urlValues[k]
}
}
}
v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
v4.buildCanonicalString() // depends on canon headers / signed headers
v4.buildStringToSign() // depends on canon string
v4.buildSignature() // depends on string to sign
if v4.isPresign {
v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature
} else {
parts := []string{
authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString,
"SignedHeaders=" + v4.signedHeaders,
"Signature=" + v4.signature,
}
v4.Request.Header.Set("Authorization", strings.Join(parts, ", "))
}
}
func (v4 *signer) buildTime() {
v4.formattedTime = v4.Time.UTC().Format(timeFormat)
v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat)
if v4.isPresign {
duration := int64(v4.ExpireTime / time.Second)
v4.Query.Set("X-Amz-Date", v4.formattedTime)
v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
} else {
v4.Request.Header.Set("X-Amz-Date", v4.formattedTime)
}
}
func (v4 *signer) buildCredentialString() {
v4.credentialString = strings.Join([]string{
v4.formattedShortTime,
v4.Region,
v4.ServiceName,
"aws4_request",
}, "/")
if v4.isPresign {
v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString)
}
}
func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
query := url.Values{}
unsignedHeaders := http.Header{}
for k, h := range header {
if r.IsValid(k) {
query[k] = h
} else {
unsignedHeaders[k] = h
}
}
return query, unsignedHeaders
}
func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) {
var headers []string
headers = append(headers, "host")
for k, v := range header {
canonicalKey := http.CanonicalHeaderKey(k)
if !r.IsValid(canonicalKey) {
continue // ignored header
}
if v4.signedHeaderVals == nil {
v4.signedHeaderVals = make(http.Header)
}
lowerCaseKey := strings.ToLower(k)
if _, ok := v4.signedHeaderVals[lowerCaseKey]; ok {
// include additional values
v4.signedHeaderVals[lowerCaseKey] = append(v4.signedHeaderVals[lowerCaseKey], v...)
continue
}
headers = append(headers, lowerCaseKey)
v4.signedHeaderVals[lowerCaseKey] = v
}
sort.Strings(headers)
v4.signedHeaders = strings.Join(headers, ";")
if v4.isPresign {
v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders)
}
headerValues := make([]string, len(headers))
for i, k := range headers {
if k == "host" {
headerValues[i] = "host:" + v4.Request.URL.Host
} else {
headerValues[i] = k + ":" +
strings.Join(v4.signedHeaderVals[k], ",")
}
}
v4.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
}
func (v4 *signer) buildCanonicalString() {
v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1)
uri := v4.Request.URL.Opaque
if uri != "" {
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
} else {
uri = v4.Request.URL.Path
}
if uri == "" {
uri = "/"
}
if v4.ServiceName != "s3" {
uri = rest.EscapePath(uri, false)
}
v4.canonicalString = strings.Join([]string{
v4.Request.Method,
uri,
v4.Request.URL.RawQuery,
v4.canonicalHeaders + "\n",
v4.signedHeaders,
v4.bodyDigest(),
}, "\n")
}
func (v4 *signer) buildStringToSign() {
v4.stringToSign = strings.Join([]string{
authHeaderPrefix,
v4.formattedTime,
v4.credentialString,
hex.EncodeToString(makeSha256([]byte(v4.canonicalString))),
}, "\n")
}
func (v4 *signer) buildSignature() {
secret := v4.CredValues.SecretAccessKey
date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime))
region := makeHmac(date, []byte(v4.Region))
service := makeHmac(region, []byte(v4.ServiceName))
credentials := makeHmac(service, []byte("aws4_request"))
signature := makeHmac(credentials, []byte(v4.stringToSign))
v4.signature = hex.EncodeToString(signature)
}
func (v4 *signer) bodyDigest() string {
hash := v4.Request.Header.Get("X-Amz-Content-Sha256")
if hash == "" {
if v4.isPresign && v4.ServiceName == "s3" {
hash = "UNSIGNED-PAYLOAD"
} else if v4.Body == nil {
hash = hex.EncodeToString(makeSha256([]byte{}))
} else {
hash = hex.EncodeToString(makeSha256Reader(v4.Body))
}
v4.Request.Header.Add("X-Amz-Content-Sha256", hash)
}
return hash
}
// isRequestSigned returns if the request is currently signed or presigned
func (v4 *signer) isRequestSigned() bool {
if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" {
return true
}
if v4.Request.Header.Get("Authorization") != "" {
return true
}
return false
}
// unsign removes signing flags for both signed and presigned requests.
func (v4 *signer) removePresign() {
v4.Query.Del("X-Amz-Algorithm")
v4.Query.Del("X-Amz-Signature")
v4.Query.Del("X-Amz-Security-Token")
v4.Query.Del("X-Amz-Date")
v4.Query.Del("X-Amz-Expires")
v4.Query.Del("X-Amz-Credential")
v4.Query.Del("X-Amz-SignedHeaders")
}
func makeHmac(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
func makeSha256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
func makeSha256Reader(reader io.ReadSeeker) []byte {
hash := sha256.New()
start, _ := reader.Seek(0, 1)
defer reader.Seek(start, 0)
io.Copy(hash, reader)
return hash.Sum(nil)
}
func stripExcessSpaces(headerVals []string) []string {
vals := make([]string, len(headerVals))
for i, str := range headerVals {
stripped := ""
found := false
str = strings.TrimSpace(str)
for _, c := range str {
if !found && c == ' ' {
stripped += string(c)
found = true
} else if c != ' ' {
stripped += string(c)
found = false
}
}
vals[i] = stripped
}
return vals
}

7
vendor/github.com/aws/aws-sdk-go/sdk.go

@ -0,0 +1,7 @@
// Package sdk is the official AWS SDK for the Go programming language.
//
// See our Developer Guide for information for on getting started and using
// the SDK.
//
// https://github.com/aws/aws-sdk-go/wiki
package sdk

5
vendor/github.com/aws/aws-sdk-go/service/generate.go

@ -0,0 +1,5 @@
// Package service contains automatically generated AWS clients.
package service
//go:generate go run ../private/model/cli/gen-api/main.go -path=../service ../models/apis/*/*/api-2.json
//go:generate gofmt -s -w ../service

1478
vendor/github.com/aws/aws-sdk-go/service/sts/api.go

File diff suppressed because it is too large

12
vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go

@ -0,0 +1,12 @@
package sts
import "github.com/aws/aws-sdk-go/aws/request"
func init() {
initRequest = func(r *request.Request) {
switch r.Operation.Name {
case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity:
r.Handlers.Sign.Clear() // these operations are unsigned
}
}
}

130
vendor/github.com/aws/aws-sdk-go/service/sts/service.go

@ -0,0 +1,130 @@
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package sts
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/query"
"github.com/aws/aws-sdk-go/private/signer/v4"
)
// The AWS Security Token Service (STS) is a web service that enables you to
// request temporary, limited-privilege credentials for AWS Identity and Access
// Management (IAM) users or for users that you authenticate (federated users).
// This guide provides descriptions of the STS API. For more detailed information
// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
//
// As an alternative to using the API, you can use one of the AWS SDKs, which
// consist of libraries and sample code for various programming languages and
// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
// way to create programmatic access to STS. For example, the SDKs take care
// of cryptographically signing requests, managing errors, and retrying requests
// automatically. For information about the AWS SDKs, including how to download
// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
//
// For information about setting up signatures and authorization through the
// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
// in the AWS General Reference. For general information about the Query API,
// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
// in Using IAM. For information about using security tokens with other AWS
// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
// in the IAM User Guide.
//
// If you're new to AWS and need additional technical information about a specific
// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
// (http://aws.amazon.com/documentation/).
//
// Endpoints
//
// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
// that maps to the US East (N. Virginia) region. Additional regions are available
// and are activated by default. For more information, see Activating and Deactivating
// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
// in the AWS General Reference.
//
// Recording API requests
//
// STS supports AWS CloudTrail, which is a service that records AWS calls for
// your AWS account and delivers log files to an Amazon S3 bucket. By using
// information collected by CloudTrail, you can determine what requests were
// successfully made to STS, who made the request, when it was made, and so
// on. To learn more about CloudTrail, including how to turn it on and find
// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
//The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
type STS struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// A ServiceName is the name of the service the client will make API calls to.
const ServiceName = "sts"
// New creates a new instance of the STS client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a STS client from just a session.
// svc := sts.New(mySession)
//
// // Create a STS client with additional configuration
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
c := p.ClientConfig(ServiceName, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *STS {
svc := &STS{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2011-06-15",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBack(v4.Sign)
svc.Handlers.Build.PushBackNamed(query.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a STS operation and runs any
// custom request initialization.
func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}

202
vendor/github.com/drone/drone-plugin-go/LICENSE

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

103
vendor/github.com/drone/drone-plugin-go/README.md

@ -0,0 +1,103 @@
drone-plugin-go
===============
This is a package with simple support for writing Drone plugins in Go.
## Overview
Plugins are executable files run by Drone to customize the build lifecycle. Plugins receive input data from `stdin` (or `arg[1]`) and write the results to `stdout`
```sh
./slack-plugin <<EOF
{
"repo" : {
"owner": "foo",
"name": "bar",
"full_name": "foo/bar"
},
"build" : {
"number": 1
"status": "success",
"started_at": 1421029603,
"finished_at": 1421029813,
"head_commit" : {
"sha": "9f2849d5",
"ref": "refs/heads/master"
"branch": "master",
"message": "Update the Readme",
"author": {
"login": "johnsmith"
"email": "john.smith@gmail.com",
}
}
},
"job" : {
"number": 1,
"status": "success",
"started_at": 1421029603,
"finished_at": 1421029813,
"exit_code": 0,
"environment": { "GO_VERSION": "1.4" }
}
"clone" : {
"branch": "master",
"remote": "git://github.com/drone/drone",
"dir": "/drone/src/github.com/drone/drone",
"ref": "refs/heads/master",
"sha": "436b7a6e2abaddfd35740527353e78a227ddcb2c"
},
"vargs": {
"webhook_url": "https://hooks.slack.com/services/...",
"username": "drone",
"channel": "#dev"
}
}
EOF
```
Use this `plugin` package to retrieve and parse input parameters:
```Go
var repo = plugin.Repo{}
var build = plugin.Build{}
var slack = struct {
URL string `json:"webhook_url"`
Username string `json:"username"`
Channel string `json:"channel"`
}{}
plugin.Param("repo", &repo)
plugin.Param("build", &build)
plugin.Param("vargs", &slack)
plugin.Parse()
```
Note that your plugin configuration data (declared in the `.drone.yml` file) will be provided in the `vargs` section of the JSON input.
### Shared Volumes
The repository clone directory (specified in the `clone.dir` input parameter) will be shared across all plugins as a [container volume](https://docs.docker.com/userguide/dockervolumes/#creating-and-mounting-a-data-volume-container). This means that any files in your repository directory or subdirectories are accessible to plugins. This is useful for plugins that analyze or archive files, such as an S3 plugin.
### Publishing
Drone plugins are distributed as Docker images. We therefore recommend publishing your plugins to [Docker Hub](https://index.docker.io).
The `ENTRYPOINT` must be defined and must point to your executable file. The `CMD` section will be overridden by Drone and will be used to send the JSON encoded data in `arg[1]`. An example Dockerfile for your plugin might look like this:
```Dockerfile
# Docker image for Drone's git-clone plugin
#
# docker build -t drone/drone-clone-git .
FROM library/golang:1.4
# copy the local src files to the container's workspace.
ADD . /go/src/github.com/drone/drone-clone-git/
# build the git-clone plugin inside the container.
RUN go get github.com/drone/drone-clone-git/... && \
go install github.com/drone/drone-clone-git
# run the git-clone plugin when the container starts
ENTRYPOINT ["/go/bin/drone-clone-git"]
```

17
vendor/github.com/drone/drone-plugin-go/plugin/const.go

@ -0,0 +1,17 @@
package plugin
const (
StatePending = "pending"
StateRunning = "running"
StateSuccess = "success"
StateFailure = "failure"
StateKilled = "killed"
StateError = "error"
)
const (
EventPush = "push"
EventPull = "pull_request"
EventTag = "tag"
EventDeploy = "deploy"
)

131
vendor/github.com/drone/drone-plugin-go/plugin/param.go

@ -0,0 +1,131 @@
package plugin
import (
"bytes"
"encoding/json"
"fmt"
"io"
"os"
)
var Stdin *ParamSet
func init() {
// defaults to stdin
Stdin = NewParamSet(os.Stdin)
// check for params after the double dash
// in the command string
for i, argv := range os.Args {
if argv == "--" {
arg := os.Args[i+1]
buf := bytes.NewBufferString(arg)
Stdin = NewParamSet(buf)
break
}
}
}
// this init function is deprecated, but I'm keeping it
// around just in case it proves useful in the future.
func deprecated_init() {
// if piping from stdin we can just exit
// and use the default Stdin value
stat, _ := os.Stdin.Stat()
if (stat.Mode() & os.ModeCharDevice) == 0 {
return
}
// check for params after the double dash
// in the command string
for i, argv := range os.Args {
if argv == "--" {
arg := os.Args[i+1]
buf := bytes.NewBufferString(arg)
Stdin = NewParamSet(buf)
return
}
}
// else use the first variable in the list
if len(os.Args) > 1 {
buf := bytes.NewBufferString(os.Args[1])
Stdin = NewParamSet(buf)
}
}
type ParamSet struct {
reader io.Reader
params map[string]interface{}
}
func NewParamSet(reader io.Reader) *ParamSet {
var p = new(ParamSet)
p.reader = reader
p.params = map[string]interface{}{}
return p
}
// Param defines a parameter with the specified name.
func (p ParamSet) Param(name string, value interface{}) {
p.params[name] = value
}
// Parse parses parameter definitions from the map.
func (p ParamSet) Parse() error {
raw := map[string]json.RawMessage{}
err := json.NewDecoder(p.reader).Decode(&raw)
if err != nil {
return err
}
for key, val := range p.params {
data, ok := raw[key]
if !ok {
continue
}
err := json.Unmarshal(data, val)
if err != nil {
return fmt.Errorf("Unable to unarmshal %s. %s", key, err)
}
}
return nil
}
// Unmarshal parses the JSON payload from the command
// arguments and unmarshal into a value pointed to by v.
func (p ParamSet) Unmarshal(v interface{}) error {
return json.NewDecoder(p.reader).Decode(v)
}
// Param defines a parameter with the specified name.
func Param(name string, value interface{}) {
Stdin.Param(name, value)
}
// Parse parses parameter definitions from the map.
func Parse() error {
return Stdin.Parse()
}
// Unmarshal parses the JSON payload from the command
// arguments and unmarshal into a value pointed to by v.
func Unmarshal(v interface{}) error {
return Stdin.Unmarshal(v)
}
// Unmarshal parses the JSON payload from the command
// arguments and unmarshal into a value pointed to by v.
func MustUnmarshal(v interface{}) error {
return Stdin.Unmarshal(v)
}
// MustParse parses parameter definitions from the map
// and panics if there is a parsing error.
func MustParse() {
err := Parse()
if err != nil {
panic(err)
}
}

97
vendor/github.com/drone/drone-plugin-go/plugin/types.go

@ -0,0 +1,97 @@
package plugin
// Repo represents a version control repository.
type Repo struct {
Kind string `json:"scm"`
Owner string `json:"owner"`
Name string `json:"name"`
FullName string `json:"full_name"`
Avatar string `json:"avatar_url"`
Link string `json:"link_url"`
Clone string `json:"clone_url"`
Branch string `json:"default_branch"`
Timeout int64 `json:"timeout"`
IsPrivate bool `json:"private"`
IsTrusted bool `json:"trusted"`
AllowPull bool `json:"allow_pr"`
AllowPush bool `json:"allow_push"`
AllowDeploy bool `json:"allow_deploys"`
AllowTag bool `json:"allow_tags"`
}
// System provides important information about the Drone
// server to the plugin.
type System struct {
Version string `json:"version"`
Link string `json:"link_url"`
Plugins []string `json:"plugins"`
Globals []string `json:"globals"`
Escalates []string `json:"privileged_plugins"`
}
// Workspace defines the build's workspace inside the
// container. This helps the plugin locate the source
// code directory.
type Workspace struct {
Root string `json:"root"`
Path string `json:"path"`
Netrc *Netrc `json:"netrc"`
Keys *Keypair `json:"keys"`
}
// Keypair represents an RSA public and private key assigned to a
// repository. It may be used to clone private repositories, or as
// a deployment key.
type Keypair struct {
Public string `json:"public"`
Private string `json:"private"`
}
// Netrc defines a default .netrc file that should be injected
// into the build environment. It will be used to authorize access
// to https resources, such as git+https clones.
type Netrc struct {
Machine string `json:"machine"`
Login string `json:"login"`
Password string `json:"user"`
}
// Build represents the process of compiling and testing a changeset,
// typically triggered by the remote system (ie GitHub).
type Build struct {
Number int `json:"number"`
Event string `json:"event"`
Status string `json:"status"`
Enqueued int64 `json:"enqueued_at"`
Created int64 `json:"created_at"`
Started int64 `json:"started_at"`
Finished int64 `json:"finished_at"`
Deploy string `json:"deploy_to"`
Commit string `json:"commit"`
Branch string `json:"branch"`
Ref string `json:"ref"`
Refspec string `json:"refspec"`
Remote string `json:"remote"`
Title string `json:"title"`
Message string `json:"message"`
Timestamp int64 `json:"timestamp"`
Author string `json:"author"`
Avatar string `json:"author_avatar"`
Email string `json:"author_email"`
Link string `json:"link_url"`
}
// Job represents a single job that is being executed as part
// of a Build.
type Job struct {
ID int64 `json:"id"`
Number int `json:"number"`
Status string `json:"status"`
ExitCode int `json:"exit_code"`
Enqueued int64 `json:"enqueued_at"`
Started int64 `json:"started_at"`
Finished int64 `json:"finished_at"`
Environment map[string]string `json:"environment"`
}

191
vendor/github.com/go-ini/ini/LICENSE

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

12
vendor/github.com/go-ini/ini/Makefile

@ -0,0 +1,12 @@
.PHONY: build test bench vet
build: vet bench
test:
go test -v -cover -race
bench:
go test -v -cover -race -test.bench=. -test.benchmem
vet:
go vet

638
vendor/github.com/go-ini/ini/README.md

@ -0,0 +1,638 @@
ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini)
===
![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
Package ini provides INI file read and write functionality in Go.
[简体中文](README_ZH.md)
## Feature
- Load multiple data sources(`[]byte` or file) with overwrites.
- Read with recursion values.
- Read with parent-child sections.
- Read with auto-increment key names.
- Read with multiple-line values.
- Read with tons of helper methods.
- Read and convert values to Go types.
- Read and **WRITE** comments of sections and keys.
- Manipulate sections, keys and comments with ease.
- Keep sections and keys in order as you parse and save.
## Installation
To use a tagged revision:
go get gopkg.in/ini.v1
To use with latest changes:
go get github.com/go-ini/ini
Please add `-u` flag to update in the future.
### Testing
If you want to test on your machine, please apply `-t` flag:
go get -t gopkg.in/ini.v1
Please add `-u` flag to update in the future.
## Getting Started
### Loading from data sources
A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error.
```go
cfg, err := ini.Load([]byte("raw data"), "filename")
```
Or start with an empty object:
```go
cfg := ini.Empty()
```
When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later.
```go
err := cfg.Append("other file", []byte("other raw data"))
```
If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error.
```go
cfg, err := ini.LooseLoad("filename", "filename_404")
```
The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual.
### Working with sections
To get a section, you would need to:
```go
section, err := cfg.GetSection("section name")
```
For a shortcut for default section, just give an empty string as name:
```go
section, err := cfg.GetSection("")
```
When you're pretty sure the section exists, following code could make your life easier:
```go
section := cfg.Section("")
```
What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
To create a new section:
```go
err := cfg.NewSection("new section")
```
To get a list of sections or section names:
```go
sections := cfg.Sections()
names := cfg.SectionStrings()
```
### Working with keys
To get a key under a section:
```go
key, err := cfg.Section("").GetKey("key name")
```
Same rule applies to key operations:
```go
key := cfg.Section("").Key("key name")
```
To check if a key exists:
```go
yes := cfg.Section("").HasKey("key name")
```
To create a new key:
```go
err := cfg.Section("").NewKey("name", "value")
```
To get a list of keys or key names:
```go
keys := cfg.Section("").Keys()
names := cfg.Section("").KeyStrings()
```
To get a clone hash of keys and corresponding values:
```go
hash := cfg.GetSection("").KeysHash()
```
### Working with values
To get a string value:
```go
val := cfg.Section("").Key("key name").String()
```
To validate key value on the fly:
```go
val := cfg.Section("").Key("key name").Validate(func(in string) string {
if len(in) == 0 {
return "default"
}
return in
})
```
If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance):
```go
val := cfg.Section("").Key("key name").Value()
```
To check if raw value exists:
```go
yes := cfg.Section("").HasValue("test value")
```
To get value with types:
```go
// For boolean values:
// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
v, err = cfg.Section("").Key("BOOL").Bool()
v, err = cfg.Section("").Key("FLOAT64").Float64()
v, err = cfg.Section("").Key("INT").Int()
v, err = cfg.Section("").Key("INT64").Int64()
v, err = cfg.Section("").Key("UINT").Uint()
v, err = cfg.Section("").Key("UINT64").Uint64()
v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
v, err = cfg.Section("").Key("TIME").Time() // RFC3339
v = cfg.Section("").Key("BOOL").MustBool()
v = cfg.Section("").Key("FLOAT64").MustFloat64()
v = cfg.Section("").Key("INT").MustInt()
v = cfg.Section("").Key("INT64").MustInt64()
v = cfg.Section("").Key("UINT").MustUint()
v = cfg.Section("").Key("UINT64").MustUint64()
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
v = cfg.Section("").Key("TIME").MustTime() // RFC3339
// Methods start with Must also accept one argument for default value
// when key not found or fail to parse value to given type.
// Except method MustString, which you have to pass a default value.
v = cfg.Section("").Key("String").MustString("default")
v = cfg.Section("").Key("BOOL").MustBool(true)
v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
v = cfg.Section("").Key("INT").MustInt(10)
v = cfg.Section("").Key("INT64").MustInt64(99)
v = cfg.Section("").Key("UINT").MustUint(3)
v = cfg.Section("").Key("UINT64").MustUint64(6)
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
```
What if my value is three-line long?
```ini
[advance]
ADDRESS = """404 road,
NotFound, State, 5000
Earth"""
```
Not a problem!
```go
cfg.Section("advance").Key("ADDRESS").String()
/* --- start ---
404 road,
NotFound, State, 5000
Earth
------ end --- */
```
That's cool, how about continuation lines?
```ini
[advance]
two_lines = how about \
continuation lines?
lots_of_lines = 1 \
2 \
3 \
4
```
Piece of cake!
```go
cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
```
Note that single quotes around values will be stripped:
```ini
foo = "some value" // foo: some value
bar = 'some value' // bar: some value
```
That's all? Hmm, no.
#### Helper methods of working with values
To get value with given candidates:
```go
v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
```
Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
To validate value in a given range:
```go
vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
```
##### Auto-split values into a slice
To use zero value of type for invalid inputs:
```go
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
vals = cfg.Section("").Key("STRINGS").Strings(",")
vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
vals = cfg.Section("").Key("INTS").Ints(",")
vals = cfg.Section("").Key("INT64S").Int64s(",")
vals = cfg.Section("").Key("UINTS").Uints(",")
vals = cfg.Section("").Key("UINT64S").Uint64s(",")
vals = cfg.Section("").Key("TIMES").Times(",")
```
To exclude invalid values out of result slice:
```go
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
// Input: how, 2.2, are, you -> [2.2]
vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
vals = cfg.Section("").Key("INTS").ValidInts(",")
vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
vals = cfg.Section("").Key("UINTS").ValidUints(",")
vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
vals = cfg.Section("").Key("TIMES").ValidTimes(",")
```
Or to return nothing but error when have invalid inputs:
```go
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
// Input: how, 2.2, are, you -> error
vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
vals = cfg.Section("").Key("INTS").StrictInts(",")
vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
vals = cfg.Section("").Key("UINTS").StrictUints(",")
vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
vals = cfg.Section("").Key("TIMES").StrictTimes(",")
```
### Save your configuration
Finally, it's time to save your configuration to somewhere.
A typical way to save configuration is writing it to a file:
```go
// ...
err = cfg.SaveTo("my.ini")
err = cfg.SaveToIndent("my.ini", "\t")
```
Another way to save is writing to a `io.Writer` interface:
```go
// ...
cfg.WriteTo(writer)
cfg.WriteToIndent(writer, "\t")
```
## Advanced Usage
### Recursive Values
For all value of keys, there is a special syntax `%(<name>)s`, where `<name>` is the key name in same section or default section, and `%(<name>)s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
```ini
NAME = ini
[author]
NAME = Unknwon
GITHUB = https://github.com/%(NAME)s
[package]
FULL_NAME = github.com/go-ini/%(NAME)s
```
```go
cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
```
### Parent-child Sections
You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
```ini
NAME = ini
VERSION = v1
IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
[package]
CLONE_URL = https://%(IMPORT_PATH)s
[package.sub]
```
```go
cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
```
#### Retrieve parent keys available to a child section
```go
cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
```
### Auto-increment Key Names
If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
```ini
[features]
-: Support read/write comments of keys and sections
-: Support auto-increment of key names
-: Support load multiple files to overwrite key values
```
```go
cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
```
### Map To Struct
Want more objective way to play with INI? Cool.
```ini
Name = Unknwon
age = 21
Male = true
Born = 1993-01-01T20:17:05Z
[Note]
Content = Hi is a good man!
Cities = HangZhou, Boston
```
```go
type Note struct {
Content string
Cities []string
}
type Person struct {
Name string
Age int `ini:"age"`
Male bool
Born time.Time
Note
Created time.Time `ini:"-"`
}
func main() {
cfg, err := ini.Load("path/to/ini")
// ...
p := new(Person)
err = cfg.MapTo(p)
// ...
// Things can be simpler.
err = ini.MapTo(p, "path/to/ini")
// ...
// Just map a section? Fine.
n := new(Note)
err = cfg.Section("Note").MapTo(n)
// ...
}
```
Can I have default value for field? Absolutely.
Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
```go
// ...
p := &Person{
Name: "Joe",
}
// ...
```
It's really cool, but what's the point if you can't give me my file back from struct?
### Reflect From Struct
Why not?
```go
type Embeded struct {
Dates []time.Time `delim:"|"`
Places []string
None []int
}
type Author struct {
Name string `ini:"NAME"`
Male bool
Age int
GPA float64
NeverMind string `ini:"-"`
*Embeded
}
func main() {
a := &Author{"Unknwon", true, 21, 2.8, "",
&Embeded{
[]time.Time{time.Now(), time.Now()},
[]string{"HangZhou", "Boston"},
[]int{},
}}
cfg := ini.Empty()
err = ini.ReflectFrom(cfg, a)
// ...
}
```
So, what do I get?
```ini
NAME = Unknwon
Male = true
Age = 21
GPA = 2.8
[Embeded]
Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
Places = HangZhou,Boston
None =
```
#### Name Mapper
To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
There are 2 built-in name mappers:
- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
To use them:
```go
type Info struct {
PackageName string
}
func main() {
err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
// ...
cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
// ...
info := new(Info)
cfg.NameMapper = ini.AllCapsUnderscore
err = cfg.MapTo(info)
// ...
}
```
Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
#### Other Notes On Map/Reflect
Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
```go
type Child struct {
Age string
}
type Parent struct {
Name string
Child
}
type Config struct {
City string
Parent
}
```
Example configuration:
```ini
City = Boston
[Parent]
Name = Unknwon
[Child]
Age = 21
```
What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
```go
type Child struct {
Age string
}
type Parent struct {
Name string
Child `ini:"Parent"`
}
type Config struct {
City string
Parent
}
```
Example configuration:
```ini
City = Boston
[Parent]
Name = Unknwon
Age = 21
```
## Getting Help
- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
- [File An Issue](https://github.com/go-ini/ini/issues/new)
## FAQs
### What does `BlockMode` field do?
By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
### Why another INI library?
Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
## License
This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.

625
vendor/github.com/go-ini/ini/README_ZH.md

@ -0,0 +1,625 @@
本包提供了 Go 语言中读写 INI 文件的功能。
## 功能特性
- 支持覆盖加载多个数据源(`[]byte` 或文件)
- 支持递归读取键值
- 支持读取父子分区
- 支持读取自增键名
- 支持读取多行的键值
- 支持大量辅助方法
- 支持在读取时直接转换为 Go 语言类型
- 支持读取和 **写入** 分区和键的注释
- 轻松操作分区、键值和注释
- 在保存文件时分区和键值会保持原有的顺序
## 下载安装
使用一个特定版本:
go get gopkg.in/ini.v1
使用最新版:
go get github.com/go-ini/ini
如需更新请添加 `-u` 选项。
### 测试安装
如果您想要在自己的机器上运行测试,请使用 `-t` 标记:
go get -t gopkg.in/ini.v1
如需更新请添加 `-u` 选项。
## 开始使用
### 从数据源加载
一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
```go
cfg, err := ini.Load([]byte("raw data"), "filename")
```
或者从一个空白的文件开始:
```go
cfg := ini.Empty()
```
当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
```go
err := cfg.Append("other file", []byte("other raw data"))
```
当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误):
```go
cfg, err := ini.LooseLoad("filename", "filename_404")
```
更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。
### 操作分区(Section)
获取指定分区:
```go
section, err := cfg.GetSection("section name")
```
如果您想要获取默认分区,则可以用空字符串代替分区名:
```go
section, err := cfg.GetSection("")
```
当您非常确定某个分区是存在的,可以使用以下简便方法:
```go
section := cfg.Section("")
```
如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
创建一个分区:
```go
err := cfg.NewSection("new section")
```
获取所有分区对象或名称:
```go
sections := cfg.Sections()
names := cfg.SectionStrings()
```
### 操作键(Key)
获取某个分区下的键:
```go
key, err := cfg.Section("").GetKey("key name")
```
和分区一样,您也可以直接获取键而忽略错误处理:
```go
key := cfg.Section("").Key("key name")
```
判断某个键是否存在:
```go
yes := cfg.Section("").HasKey("key name")
```
创建一个新的键:
```go
err := cfg.Section("").NewKey("name", "value")
```
获取分区下的所有键或键名:
```go
keys := cfg.Section("").Keys()
names := cfg.Section("").KeyStrings()
```
获取分区下的所有键值对的克隆:
```go
hash := cfg.GetSection("").KeysHash()
```
### 操作键值(Value)
获取一个类型为字符串(string)的值:
```go
val := cfg.Section("").Key("key name").String()
```
获取值的同时通过自定义函数进行处理验证:
```go
val := cfg.Section("").Key("key name").Validate(func(in string) string {
if len(in) == 0 {
return "default"
}
return in
})
```
如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳):
```go
val := cfg.Section("").Key("key name").Value()
```
判断某个原值是否存在:
```go
yes := cfg.Section("").HasValue("test value")
```
获取其它类型的值:
```go
// 布尔值的规则:
// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
v, err = cfg.Section("").Key("BOOL").Bool()
v, err = cfg.Section("").Key("FLOAT64").Float64()
v, err = cfg.Section("").Key("INT").Int()
v, err = cfg.Section("").Key("INT64").Int64()
v, err = cfg.Section("").Key("UINT").Uint()
v, err = cfg.Section("").Key("UINT64").Uint64()
v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
v, err = cfg.Section("").Key("TIME").Time() // RFC3339
v = cfg.Section("").Key("BOOL").MustBool()
v = cfg.Section("").Key("FLOAT64").MustFloat64()
v = cfg.Section("").Key("INT").MustInt()
v = cfg.Section("").Key("INT64").MustInt64()
v = cfg.Section("").Key("UINT").MustUint()
v = cfg.Section("").Key("UINT64").MustUint64()
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
v = cfg.Section("").Key("TIME").MustTime() // RFC3339
// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
// 当键不存在或者转换失败时,则会直接返回该默认值。
// 但是,MustString 方法必须传递一个默认值。
v = cfg.Seciont("").Key("String").MustString("default")
v = cfg.Section("").Key("BOOL").MustBool(true)
v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
v = cfg.Section("").Key("INT").MustInt(10)
v = cfg.Section("").Key("INT64").MustInt64(99)
v = cfg.Section("").Key("UINT").MustUint(3)
v = cfg.Section("").Key("UINT64").MustUint64(6)
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
```
如果我的值有好多行怎么办?
```ini
[advance]
ADDRESS = """404 road,
NotFound, State, 5000
Earth"""
```
嗯哼?小 case!
```go
cfg.Section("advance").Key("ADDRESS").String()
/* --- start ---
404 road,
NotFound, State, 5000
Earth
------ end --- */
```
赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
```ini
[advance]
two_lines = how about \
continuation lines?
lots_of_lines = 1 \
2 \
3 \
4
```
简直是小菜一碟!
```go
cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
```
需要注意的是,值两侧的单引号会被自动剔除:
```ini
foo = "some value" // foo: some value
bar = 'some value' // bar: some value
```
这就是全部了?哈哈,当然不是。
#### 操作键值的辅助方法
获取键值时设定候选值:
```go
v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
```
如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
验证获取的值是否在指定范围内:
```go
vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
```
##### 自动分割键值到切片(slice)
当存在无效输入时,使用零值代替:
```go
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
vals = cfg.Section("").Key("STRINGS").Strings(",")
vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
vals = cfg.Section("").Key("INTS").Ints(",")
vals = cfg.Section("").Key("INT64S").Int64s(",")
vals = cfg.Section("").Key("UINTS").Uints(",")
vals = cfg.Section("").Key("UINT64S").Uint64s(",")
vals = cfg.Section("").Key("TIMES").Times(",")
```
从结果切片中剔除无效输入:
```go
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
// Input: how, 2.2, are, you -> [2.2]
vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
vals = cfg.Section("").Key("INTS").ValidInts(",")
vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
vals = cfg.Section("").Key("UINTS").ValidUints(",")
vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
vals = cfg.Section("").Key("TIMES").ValidTimes(",")
```
当存在无效输入时,直接返回错误:
```go
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
// Input: how, 2.2, are, you -> error
vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
vals = cfg.Section("").Key("INTS").StrictInts(",")
vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
vals = cfg.Section("").Key("UINTS").StrictUints(",")
vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
vals = cfg.Section("").Key("TIMES").StrictTimes(",")
```
### 保存配置
终于到了这个时刻,是时候保存一下配置了。
比较原始的做法是输出配置到某个文件:
```go
// ...
err = cfg.SaveTo("my.ini")
err = cfg.SaveToIndent("my.ini", "\t")
```
另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
```go
// ...
cfg.WriteTo(writer)
cfg.WriteToIndent(writer, "\t")
```
### 高级用法
#### 递归读取键值
在获取所有键值的过程中,特殊语法 `%(<name>)s` 会被应用,其中 `<name>` 可以是相同分区或者默认分区下的键名。字符串 `%(<name>)s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
```ini
NAME = ini
[author]
NAME = Unknwon
GITHUB = https://github.com/%(NAME)s
[package]
FULL_NAME = github.com/go-ini/%(NAME)s
```
```go
cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
```
#### 读取父子分区
您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
```ini
NAME = ini
VERSION = v1
IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
[package]
CLONE_URL = https://%(IMPORT_PATH)s
[package.sub]
```
```go
cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
```
#### 获取上级父分区下的所有键名
```go
cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
```
#### 读取自增键名
如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
```ini
[features]
-: Support read/write comments of keys and sections
-: Support auto-increment of key names
-: Support load multiple files to overwrite key values
```
```go
cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
```
### 映射到结构
想要使用更加面向对象的方式玩转 INI 吗?好主意。
```ini
Name = Unknwon
age = 21
Male = true
Born = 1993-01-01T20:17:05Z
[Note]
Content = Hi is a good man!
Cities = HangZhou, Boston
```
```go
type Note struct {
Content string
Cities []string
}
type Person struct {
Name string
Age int `ini:"age"`
Male bool
Born time.Time
Note
Created time.Time `ini:"-"`
}
func main() {
cfg, err := ini.Load("path/to/ini")
// ...
p := new(Person)
err = cfg.MapTo(p)
// ...
// 一切竟可以如此的简单。
err = ini.MapTo(p, "path/to/ini")
// ...
// 嗯哼?只需要映射一个分区吗?
n := new(Note)
err = cfg.Section("Note").MapTo(n)
// ...
}
```
结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
```go
// ...
p := &Person{
Name: "Joe",
}
// ...
```
这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
### 从结构反射
可是,我有说不能吗?
```go
type Embeded struct {
Dates []time.Time `delim:"|"`
Places []string
None []int
}
type Author struct {
Name string `ini:"NAME"`
Male bool
Age int
GPA float64
NeverMind string `ini:"-"`
*Embeded
}
func main() {
a := &Author{"Unknwon", true, 21, 2.8, "",
&Embeded{
[]time.Time{time.Now(), time.Now()},
[]string{"HangZhou", "Boston"},
[]int{},
}}
cfg := ini.Empty()
err = ini.ReflectFrom(cfg, a)
// ...
}
```
瞧瞧,奇迹发生了。
```ini
NAME = Unknwon
Male = true
Age = 21
GPA = 2.8
[Embeded]
Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
Places = HangZhou,Boston
None =
```
#### 名称映射器(Name Mapper)
为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
目前有 2 款内置的映射器:
- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
使用方法:
```go
type Info struct{
PackageName string
}
func main() {
err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
// ...
cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
// ...
info := new(Info)
cfg.NameMapper = ini.AllCapsUnderscore
err = cfg.MapTo(info)
// ...
}
```
使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
#### 映射/反射的其它说明
任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
```go
type Child struct {
Age string
}
type Parent struct {
Name string
Child
}
type Config struct {
City string
Parent
}
```
示例配置文件:
```ini
City = Boston
[Parent]
Name = Unknwon
[Child]
Age = 21
```
很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
```go
type Child struct {
Age string
}
type Parent struct {
Name string
Child `ini:"Parent"`
}
type Config struct {
City string
Parent
}
```
示例配置文件:
```ini
City = Boston
[Parent]
Name = Unknwon
Age = 21
```
## 获取帮助
- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
- [创建工单](https://github.com/go-ini/ini/issues/new)
## 常见问题
### 字段 `BlockMode` 是什么?
默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
### 为什么要写另一个 INI 解析库?
许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)

465
vendor/github.com/go-ini/ini/ini.go

@ -0,0 +1,465 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// Package ini provides INI file read and write functionality in Go.
package ini
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
const (
// Name for default section. You can use this constant or the string literal.
// In most of cases, an empty string is all you need to access the section.
DEFAULT_SECTION = "DEFAULT"
// Maximum allowed depth when recursively substituing variable names.
_DEPTH_VALUES = 99
_VERSION = "1.12.0"
)
// Version returns current package version literal.
func Version() string {
return _VERSION
}
var (
// Delimiter to determine or compose a new line.
// This variable will be changed to "\r\n" automatically on Windows
// at package init time.
LineBreak = "\n"
// Variable regexp pattern: %(variable)s
varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
// Indicate whether to align "=" sign with spaces to produce pretty output
// or reduce all possible spaces for compact format.
PrettyFormat = true
// Explicitly write DEFAULT section header
DefaultHeader = false
)
func init() {
if runtime.GOOS == "windows" {
LineBreak = "\r\n"
}
}
func inSlice(str string, s []string) bool {
for _, v := range s {
if str == v {
return true
}
}
return false
}
// dataSource is an interface that returns object which can be read and closed.
type dataSource interface {
ReadCloser() (io.ReadCloser, error)
}
// sourceFile represents an object that contains content on the local file system.
type sourceFile struct {
name string
}
func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
return os.Open(s.name)
}
type bytesReadCloser struct {
reader io.Reader
}
func (rc *bytesReadCloser) Read(p []byte) (n int, err error) {
return rc.reader.Read(p)
}
func (rc *bytesReadCloser) Close() error {
return nil
}
// sourceData represents an object that contains content in memory.
type sourceData struct {
data []byte
}
func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
return &bytesReadCloser{bytes.NewReader(s.data)}, nil
}
// File represents a combination of a or more INI file(s) in memory.
type File struct {
// Should make things safe, but sometimes doesn't matter.
BlockMode bool
// Make sure data is safe in multiple goroutines.
lock sync.RWMutex
// Allow combination of multiple data sources.
dataSources []dataSource
// Actual data is stored here.
sections map[string]*Section
// To keep data in order.
sectionList []string
// Whether the parser should ignore nonexistent files or return error.
looseMode bool
NameMapper
}
// newFile initializes File object with given data sources.
func newFile(dataSources []dataSource, looseMode bool) *File {
return &File{
BlockMode: true,
dataSources: dataSources,
sections: make(map[string]*Section),
sectionList: make([]string, 0, 10),
looseMode: looseMode,
}
}
func parseDataSource(source interface{}) (dataSource, error) {
switch s := source.(type) {
case string:
return sourceFile{s}, nil
case []byte:
return &sourceData{s}, nil
default:
return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
}
}
func loadSources(looseMode bool, source interface{}, others ...interface{}) (_ *File, err error) {
sources := make([]dataSource, len(others)+1)
sources[0], err = parseDataSource(source)
if err != nil {
return nil, err
}
for i := range others {
sources[i+1], err = parseDataSource(others[i])
if err != nil {
return nil, err
}
}
f := newFile(sources, looseMode)
if err = f.Reload(); err != nil {
return nil, err
}
return f, nil
}
// Load loads and parses from INI data sources.
// Arguments can be mixed of file name with string type, or raw data in []byte.
// It will return error if list contains nonexistent files.
func Load(source interface{}, others ...interface{}) (*File, error) {
return loadSources(false, source, others...)
}
// LooseLoad has exactly same functionality as Load function
// except it ignores nonexistent files instead of returning error.
func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
return loadSources(true, source, others...)
}
// Empty returns an empty file object.
func Empty() *File {
// Ignore error here, we sure our data is good.
f, _ := Load([]byte(""))
return f
}
// NewSection creates a new section.
func (f *File) NewSection(name string) (*Section, error) {
if len(name) == 0 {
return nil, errors.New("error creating new section: empty section name")
}
if f.BlockMode {
f.lock.Lock()
defer f.lock.Unlock()
}
if inSlice(name, f.sectionList) {
return f.sections[name], nil
}
f.sectionList = append(f.sectionList, name)
f.sections[name] = newSection(f, name)
return f.sections[name], nil
}
// NewSections creates a list of sections.
func (f *File) NewSections(names ...string) (err error) {
for _, name := range names {
if _, err = f.NewSection(name); err != nil {
return err
}
}
return nil
}
// GetSection returns section by given name.
func (f *File) GetSection(name string) (*Section, error) {
if len(name) == 0 {
name = DEFAULT_SECTION
}
if f.BlockMode {
f.lock.RLock()
defer f.lock.RUnlock()
}
sec := f.sections[name]
if sec == nil {
return nil, fmt.Errorf("section '%s' does not exist", name)
}
return sec, nil
}
// Section assumes named section exists and returns a zero-value when not.
func (f *File) Section(name string) *Section {
sec, err := f.GetSection(name)
if err != nil {
// Note: It's OK here because the only possible error is empty section name,
// but if it's empty, this piece of code won't be executed.
sec, _ = f.NewSection(name)
return sec
}
return sec
}
// Section returns list of Section.
func (f *File) Sections() []*Section {
sections := make([]*Section, len(f.sectionList))
for i := range f.sectionList {
sections[i] = f.Section(f.sectionList[i])
}
return sections
}
// SectionStrings returns list of section names.
func (f *File) SectionStrings() []string {
list := make([]string, len(f.sectionList))
copy(list, f.sectionList)
return list
}
// DeleteSection deletes a section.
func (f *File) DeleteSection(name string) {
if f.BlockMode {
f.lock.Lock()
defer f.lock.Unlock()
}
if len(name) == 0 {
name = DEFAULT_SECTION
}
for i, s := range f.sectionList {
if s == name {
f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
delete(f.sections, name)
return
}
}
}
func (f *File) reload(s dataSource) error {
r, err := s.ReadCloser()
if err != nil {
return err
}
defer r.Close()
return f.parse(r)
}
// Reload reloads and parses all data sources.
func (f *File) Reload() (err error) {
for _, s := range f.dataSources {
if err = f.reload(s); err != nil {
// In loose mode, we create an empty default section for nonexistent files.
if os.IsNotExist(err) && f.looseMode {
f.parse(bytes.NewBuffer(nil))
continue
}
return err
}
}
return nil
}
// Append appends one or more data sources and reloads automatically.
func (f *File) Append(source interface{}, others ...interface{}) error {
ds, err := parseDataSource(source)
if err != nil {
return err
}
f.dataSources = append(f.dataSources, ds)
for _, s := range others {
ds, err = parseDataSource(s)
if err != nil {
return err
}
f.dataSources = append(f.dataSources, ds)
}
return f.Reload()
}
// WriteToIndent writes content into io.Writer with given indention.
// If PrettyFormat has been set to be true,
// it will align "=" sign with spaces under each section.
func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
equalSign := "="
if PrettyFormat {
equalSign = " = "
}
// Use buffer to make sure target is safe until finish encoding.
buf := bytes.NewBuffer(nil)
for i, sname := range f.sectionList {
sec := f.Section(sname)
if len(sec.Comment) > 0 {
if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
sec.Comment = "; " + sec.Comment
}
if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil {
return 0, err
}
}
if i > 0 || DefaultHeader {
if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
return 0, err
}
} else {
// Write nothing if default section is empty
if len(sec.keyList) == 0 {
continue
}
}
// Count and generate alignment length and buffer spaces
alignLength := 0
if PrettyFormat {
for i := 0; i < len(sec.keyList); i++ {
if len(sec.keyList[i]) > alignLength {
alignLength = len(sec.keyList[i])
}
}
}
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
for _, kname := range sec.keyList {
key := sec.Key(kname)
if len(key.Comment) > 0 {
if len(indent) > 0 && sname != DEFAULT_SECTION {
buf.WriteString(indent)
}
if key.Comment[0] != '#' && key.Comment[0] != ';' {
key.Comment = "; " + key.Comment
}
if _, err = buf.WriteString(key.Comment + LineBreak); err != nil {
return 0, err
}
}
if len(indent) > 0 && sname != DEFAULT_SECTION {
buf.WriteString(indent)
}
switch {
case key.isAutoIncr:
kname = "-"
case strings.ContainsAny(kname, "\"=:"):
kname = "`" + kname + "`"
case strings.Contains(kname, "`"):
kname = `"""` + kname + `"""`
}
if _, err = buf.WriteString(kname); err != nil {
return 0, err
}
// Write out alignment spaces before "=" sign
if PrettyFormat {
buf.Write(alignSpaces[:alignLength-len(kname)])
}
val := key.value
// In case key value contains "\n", "`", "\"", "#" or ";"
if strings.ContainsAny(val, "\n`") {
val = `"""` + val + `"""`
} else if strings.ContainsAny(val, "#;") {
val = "`" + val + "`"
}
if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
return 0, err
}
}
// Put a line between sections
if _, err = buf.WriteString(LineBreak); err != nil {
return 0, err
}
}
return buf.WriteTo(w)
}
// WriteTo writes file content into io.Writer.
func (f *File) WriteTo(w io.Writer) (int64, error) {
return f.WriteToIndent(w, "")
}
// SaveToIndent writes content to file system with given value indention.
func (f *File) SaveToIndent(filename, indent string) error {
// Note: Because we are truncating with os.Create,
// so it's safer to save to a temporary file location and rename afte done.
tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp"
defer os.Remove(tmpPath)
fw, err := os.Create(tmpPath)
if err != nil {
return err
}
if _, err = f.WriteToIndent(fw, indent); err != nil {
fw.Close()
return err
}
fw.Close()
// Remove old file and rename the new one.
os.Remove(filename)
return os.Rename(tmpPath, filename)
}
// SaveTo writes content to file system.
func (f *File) SaveTo(filename string) error {
return f.SaveToIndent(filename, "")
}

616
vendor/github.com/go-ini/ini/key.go

@ -0,0 +1,616 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"fmt"
"strconv"
"strings"
"time"
)
// Key represents a key under a section.
type Key struct {
s *Section
Comment string
name string
value string
isAutoIncr bool
}
// Name returns name of key.
func (k *Key) Name() string {
return k.name
}
// Value returns raw value of key for performance purpose.
func (k *Key) Value() string {
return k.value
}
// String returns string representation of value.
func (k *Key) String() string {
val := k.value
if strings.Index(val, "%") == -1 {
return val
}
for i := 0; i < _DEPTH_VALUES; i++ {
vr := varPattern.FindString(val)
if len(vr) == 0 {
break
}
// Take off leading '%(' and trailing ')s'.
noption := strings.TrimLeft(vr, "%(")
noption = strings.TrimRight(noption, ")s")
// Search in the same section.
nk, err := k.s.GetKey(noption)
if err != nil {
// Search again in default section.
nk, _ = k.s.f.Section("").GetKey(noption)
}
// Substitute by new value and take off leading '%(' and trailing ')s'.
val = strings.Replace(val, vr, nk.value, -1)
}
return val
}
// Validate accepts a validate function which can
// return modifed result as key value.
func (k *Key) Validate(fn func(string) string) string {
return fn(k.String())
}
// parseBool returns the boolean value represented by the string.
//
// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
// Any other value returns an error.
func parseBool(str string) (value bool, err error) {
switch str {
case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
return true, nil
case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
return false, nil
}
return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
}
// Bool returns bool type value.
func (k *Key) Bool() (bool, error) {
return parseBool(k.String())
}
// Float64 returns float64 type value.
func (k *Key) Float64() (float64, error) {
return strconv.ParseFloat(k.String(), 64)
}
// Int returns int type value.
func (k *Key) Int() (int, error) {
return strconv.Atoi(k.String())
}
// Int64 returns int64 type value.
func (k *Key) Int64() (int64, error) {
return strconv.ParseInt(k.String(), 10, 64)
}
// Uint returns uint type valued.
func (k *Key) Uint() (uint, error) {
u, e := strconv.ParseUint(k.String(), 10, 64)
return uint(u), e
}
// Uint64 returns uint64 type value.
func (k *Key) Uint64() (uint64, error) {
return strconv.ParseUint(k.String(), 10, 64)
}
// Duration returns time.Duration type value.
func (k *Key) Duration() (time.Duration, error) {
return time.ParseDuration(k.String())
}
// TimeFormat parses with given format and returns time.Time type value.
func (k *Key) TimeFormat(format string) (time.Time, error) {
return time.Parse(format, k.String())
}
// Time parses with RFC3339 format and returns time.Time type value.
func (k *Key) Time() (time.Time, error) {
return k.TimeFormat(time.RFC3339)
}
// MustString returns default value if key value is empty.
func (k *Key) MustString(defaultVal string) string {
val := k.String()
if len(val) == 0 {
return defaultVal
}
return val
}
// MustBool always returns value without error,
// it returns false if error occurs.
func (k *Key) MustBool(defaultVal ...bool) bool {
val, err := k.Bool()
if len(defaultVal) > 0 && err != nil {
return defaultVal[0]
}
return val
}
// MustFloat64 always returns value without error,
// it returns 0.0 if error occurs.
func (k *Key) MustFloat64(defaultVal ...float64) float64 {
val, err := k.Float64()
if len(defaultVal) > 0 && err != nil {
return defaultVal[0]
}
return val
}
// MustInt always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustInt(defaultVal ...int) int {
val, err := k.Int()
if len(defaultVal) > 0 && err != nil {
return defaultVal[0]
}
return val
}
// MustInt64 always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustInt64(defaultVal ...int64) int64 {
val, err := k.Int64()
if len(defaultVal) > 0 && err != nil {
return defaultVal[0]
}
return val
}
// MustUint always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustUint(defaultVal ...uint) uint {
val, err := k.Uint()
if len(defaultVal) > 0 && err != nil {
return defaultVal[0]
}
return val
}
// MustUint64 always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
val, err := k.Uint64()
if len(defaultVal) > 0 && err != nil {
return defaultVal[0]
}
return val
}
// MustDuration always returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
val, err := k.Duration()
if len(defaultVal) > 0 && err != nil {
return defaultVal[0]
}
return val
}
// MustTimeFormat always parses with given format and returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
val, err := k.TimeFormat(format)
if len(defaultVal) > 0 && err != nil {
return defaultVal[0]
}
return val
}
// MustTime always parses with RFC3339 format and returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
return k.MustTimeFormat(time.RFC3339, defaultVal...)
}
// In always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) In(defaultVal string, candidates []string) string {
val := k.String()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InFloat64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
val := k.MustFloat64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InInt always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InInt(defaultVal int, candidates []int) int {
val := k.MustInt()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InInt64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
val := k.MustInt64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InUint always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
val := k.MustUint()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InUint64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
val := k.MustUint64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InTimeFormat always parses with given format and returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
val := k.MustTimeFormat(format)
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InTime always parses with RFC3339 format and returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
}
// RangeFloat64 checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
val := k.MustFloat64()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeInt checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeInt(defaultVal, min, max int) int {
val := k.MustInt()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeInt64 checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
val := k.MustInt64()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeTimeFormat checks if value with given format is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
val := k.MustTimeFormat(format)
if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
return defaultVal
}
return val
}
// RangeTime checks if value with RFC3339 format is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
}
// Strings returns list of string divided by given delimiter.
func (k *Key) Strings(delim string) []string {
str := k.String()
if len(str) == 0 {
return []string{}
}
vals := strings.Split(str, delim)
for i := range vals {
vals[i] = strings.TrimSpace(vals[i])
}
return vals
}
// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Float64s(delim string) []float64 {
vals, _ := k.getFloat64s(delim, true, false)
return vals
}
// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Ints(delim string) []int {
vals, _ := k.getInts(delim, true, false)
return vals
}
// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Int64s(delim string) []int64 {
vals, _ := k.getInt64s(delim, true, false)
return vals
}
// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uints(delim string) []uint {
vals, _ := k.getUints(delim, true, false)
return vals
}
// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uint64s(delim string) []uint64 {
vals, _ := k.getUint64s(delim, true, false)
return vals
}
// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
func (k *Key) TimesFormat(format, delim string) []time.Time {
vals, _ := k.getTimesFormat(format, delim, true, false)
return vals
}
// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
func (k *Key) Times(delim string) []time.Time {
return k.TimesFormat(time.RFC3339, delim)
}
// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
// it will not be included to result list.
func (k *Key) ValidFloat64s(delim string) []float64 {
vals, _ := k.getFloat64s(delim, false, false)
return vals
}
// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
// not be included to result list.
func (k *Key) ValidInts(delim string) []int {
vals, _ := k.getInts(delim, false, false)
return vals
}
// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
// then it will not be included to result list.
func (k *Key) ValidInt64s(delim string) []int64 {
vals, _ := k.getInt64s(delim, false, false)
return vals
}
// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
// then it will not be included to result list.
func (k *Key) ValidUints(delim string) []uint {
vals, _ := k.getUints(delim, false, false)
return vals
}
// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
// integer, then it will not be included to result list.
func (k *Key) ValidUint64s(delim string) []uint64 {
vals, _ := k.getUint64s(delim, false, false)
return vals
}
// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
vals, _ := k.getTimesFormat(format, delim, false, false)
return vals
}
// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
func (k *Key) ValidTimes(delim string) []time.Time {
return k.ValidTimesFormat(time.RFC3339, delim)
}
// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
return k.getFloat64s(delim, false, true)
}
// StrictInts returns list of int divided by given delimiter or error on first invalid input.
func (k *Key) StrictInts(delim string) ([]int, error) {
return k.getInts(delim, false, true)
}
// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictInt64s(delim string) ([]int64, error) {
return k.getInt64s(delim, false, true)
}
// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
func (k *Key) StrictUints(delim string) ([]uint, error) {
return k.getUints(delim, false, true)
}
// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
return k.getUint64s(delim, false, true)
}
// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
// or error on first invalid input.
func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
return k.getTimesFormat(format, delim, false, true)
}
// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
// or error on first invalid input.
func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
return k.StrictTimesFormat(time.RFC3339, delim)
}
// getFloat64s returns list of float64 divided by given delimiter.
func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) {
strs := k.Strings(delim)
vals := make([]float64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseFloat(str, 64)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// getInts returns list of int divided by given delimiter.
func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) {
strs := k.Strings(delim)
vals := make([]int, 0, len(strs))
for _, str := range strs {
val, err := strconv.Atoi(str)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// getInt64s returns list of int64 divided by given delimiter.
func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) {
strs := k.Strings(delim)
vals := make([]int64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseInt(str, 10, 64)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// getUints returns list of uint divided by given delimiter.
func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) {
strs := k.Strings(delim)
vals := make([]uint, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseUint(str, 10, 0)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, uint(val))
}
}
return vals, nil
}
// getUint64s returns list of uint64 divided by given delimiter.
func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
strs := k.Strings(delim)
vals := make([]uint64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseUint(str, 10, 64)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
strs := k.Strings(delim)
vals := make([]time.Time, 0, len(strs))
for _, str := range strs {
val, err := time.Parse(format, str)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// SetValue changes key value.
func (k *Key) SetValue(v string) {
if k.s.f.BlockMode {
k.s.f.lock.Lock()
defer k.s.f.lock.Unlock()
}
k.value = v
k.s.keysHash[k.name] = v
}

312
vendor/github.com/go-ini/ini/parser.go

@ -0,0 +1,312 @@
// Copyright 2015 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"unicode"
)
type tokenType int
const (
_TOKEN_INVALID tokenType = iota
_TOKEN_COMMENT
_TOKEN_SECTION
_TOKEN_KEY
)
type parser struct {
buf *bufio.Reader
isEOF bool
count int
comment *bytes.Buffer
}
func newParser(r io.Reader) *parser {
return &parser{
buf: bufio.NewReader(r),
count: 1,
comment: &bytes.Buffer{},
}
}
// BOM handles header of BOM-UTF8 format.
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
func (p *parser) BOM() error {
mask, err := p.buf.Peek(3)
if err != nil && err != io.EOF {
return err
} else if len(mask) < 3 {
return nil
} else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
p.buf.Read(mask)
}
return nil
}
func (p *parser) readUntil(delim byte) ([]byte, error) {
data, err := p.buf.ReadBytes(delim)
if err != nil {
if err == io.EOF {
p.isEOF = true
} else {
return nil, err
}
}
return data, nil
}
func cleanComment(in []byte) ([]byte, bool) {
i := bytes.IndexAny(in, "#;")
if i == -1 {
return nil, false
}
return in[i:], true
}
func readKeyName(in []byte) (string, int, error) {
line := string(in)
// Check if key name surrounded by quotes.
var keyQuote string
if line[0] == '"' {
if len(line) > 6 && string(line[0:3]) == `"""` {
keyQuote = `"""`
} else {
keyQuote = `"`
}
} else if line[0] == '`' {
keyQuote = "`"
}
// Get out key name
endIdx := -1
if len(keyQuote) > 0 {
startIdx := len(keyQuote)
// FIXME: fail case -> """"""name"""=value
pos := strings.Index(line[startIdx:], keyQuote)
if pos == -1 {
return "", -1, fmt.Errorf("missing closing key quote: %s", line)
}
pos += startIdx
// Find key-value delimiter
i := strings.IndexAny(line[pos+startIdx:], "=:")
if i < 0 {
return "", -1, fmt.Errorf("key-value delimiter not found: %s", line)
}
endIdx = pos + i
return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
}
endIdx = strings.IndexAny(line, "=:")
if endIdx < 0 {
return "", -1, fmt.Errorf("key-value delimiter not found: %s", line)
}
return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
}
func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
for {
data, err := p.readUntil('\n')
if err != nil {
return "", err
}
next := string(data)
pos := strings.LastIndex(next, valQuote)
if pos > -1 {
val += next[:pos]
comment, has := cleanComment([]byte(next[pos:]))
if has {
p.comment.Write(bytes.TrimSpace(comment))
}
break
}
val += next
if p.isEOF {
return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
}
}
return val, nil
}
func (p *parser) readContinuationLines(val string) (string, error) {
for {
data, err := p.readUntil('\n')
if err != nil {
return "", err
}
next := strings.TrimSpace(string(data))
if len(next) == 0 {
break
}
val += next
if val[len(val)-1] != '\\' {
break
}
val = val[:len(val)-1]
}
return val, nil
}
// hasSurroundedQuote check if and only if the first and last characters
// are quotes \" or \'.
// It returns false if any other parts also contain same kind of quotes.
func hasSurroundedQuote(in string, quote byte) bool {
return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote &&
strings.IndexByte(in[1:], quote) == len(in)-2
}
func (p *parser) readValue(in []byte) (string, error) {
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
if len(line) == 0 {
return "", nil
}
var valQuote string
if len(line) > 3 && string(line[0:3]) == `"""` {
valQuote = `"""`
} else if line[0] == '`' {
valQuote = "`"
}
if len(valQuote) > 0 {
startIdx := len(valQuote)
pos := strings.LastIndex(line[startIdx:], valQuote)
// Check for multi-line value
if pos == -1 {
return p.readMultilines(line, line[startIdx:], valQuote)
}
return line[startIdx : pos+startIdx], nil
}
// Won't be able to reach here if value only contains whitespace.
line = strings.TrimSpace(line)
// Check continuation lines
if line[len(line)-1] == '\\' {
return p.readContinuationLines(line[:len(line)-1])
}
i := strings.IndexAny(line, "#;")
if i > -1 {
p.comment.WriteString(line[i:])
line = strings.TrimSpace(line[:i])
}
// Trim single quotes
if hasSurroundedQuote(line, '\'') ||
hasSurroundedQuote(line, '"') {
line = line[1 : len(line)-1]
}
return line, nil
}
// parse parses data through an io.Reader.
func (f *File) parse(reader io.Reader) (err error) {
p := newParser(reader)
if err = p.BOM(); err != nil {
return fmt.Errorf("BOM: %v", err)
}
// Ignore error because default section name is never empty string.
section, _ := f.NewSection(DEFAULT_SECTION)
var line []byte
for !p.isEOF {
line, err = p.readUntil('\n')
if err != nil {
return err
}
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
if len(line) == 0 {
continue
}
// Comments
if line[0] == '#' || line[0] == ';' {
// Note: we do not care ending line break,
// it is needed for adding second line,
// so just clean it once at the end when set to value.
p.comment.Write(line)
continue
}
// Section
if line[0] == '[' {
// Read to the next ']' (TODO: support quoted strings)
closeIdx := bytes.IndexByte(line, ']')
if closeIdx == -1 {
return fmt.Errorf("unclosed section: %s", line)
}
section, err = f.NewSection(string(line[1:closeIdx]))
if err != nil {
return err
}
comment, has := cleanComment(line[closeIdx+1:])
if has {
p.comment.Write(comment)
}
section.Comment = strings.TrimSpace(p.comment.String())
// Reset aotu-counter and comments
p.comment.Reset()
p.count = 1
continue
}
kname, offset, err := readKeyName(line)
if err != nil {
return err
}
// Auto increment.
isAutoIncr := false
if kname == "-" {
isAutoIncr = true
kname = "#" + strconv.Itoa(p.count)
p.count++
}
key, err := section.NewKey(kname, "")
if err != nil {
return err
}
key.isAutoIncr = isAutoIncr
value, err := p.readValue(line[offset:])
if err != nil {
return err
}
key.SetValue(value)
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
}
return nil
}

197
vendor/github.com/go-ini/ini/section.go

@ -0,0 +1,197 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"errors"
"fmt"
"strings"
)
// Section represents a config section.
type Section struct {
f *File
Comment string
name string
keys map[string]*Key
keyList []string
keysHash map[string]string
}
func newSection(f *File, name string) *Section {
return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)}
}
// Name returns name of Section.
func (s *Section) Name() string {
return s.name
}
// NewKey creates a new key to given section.
func (s *Section) NewKey(name, val string) (*Key, error) {
if len(name) == 0 {
return nil, errors.New("error creating new key: empty key name")
}
if s.f.BlockMode {
s.f.lock.Lock()
defer s.f.lock.Unlock()
}
if inSlice(name, s.keyList) {
s.keys[name].value = val
return s.keys[name], nil
}
s.keyList = append(s.keyList, name)
s.keys[name] = &Key{s, "", name, val, false}
s.keysHash[name] = val
return s.keys[name], nil
}
// GetKey returns key in section by given name.
func (s *Section) GetKey(name string) (*Key, error) {
// FIXME: change to section level lock?
if s.f.BlockMode {
s.f.lock.RLock()
}
key := s.keys[name]
if s.f.BlockMode {
s.f.lock.RUnlock()
}
if key == nil {
// Check if it is a child-section.
sname := s.name
for {
if i := strings.LastIndex(sname, "."); i > -1 {
sname = sname[:i]
sec, err := s.f.GetSection(sname)
if err != nil {
continue
}
return sec.GetKey(name)
} else {
break
}
}
return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
}
return key, nil
}
// HasKey returns true if section contains a key with given name.
func (s *Section) HasKey(name string) bool {
key, _ := s.GetKey(name)
return key != nil
}
// Haskey is a backwards-compatible name for HasKey.
func (s *Section) Haskey(name string) bool {
return s.HasKey(name)
}
// HasValue returns true if section contains given raw value.
func (s *Section) HasValue(value string) bool {
if s.f.BlockMode {
s.f.lock.RLock()
defer s.f.lock.RUnlock()
}
for _, k := range s.keys {
if value == k.value {
return true
}
}
return false
}
// Key assumes named Key exists in section and returns a zero-value when not.
func (s *Section) Key(name string) *Key {
key, err := s.GetKey(name)
if err != nil {
// It's OK here because the only possible error is empty key name,
// but if it's empty, this piece of code won't be executed.
key, _ = s.NewKey(name, "")
return key
}
return key
}
// Keys returns list of keys of section.
func (s *Section) Keys() []*Key {
keys := make([]*Key, len(s.keyList))
for i := range s.keyList {
keys[i] = s.Key(s.keyList[i])
}
return keys
}
// ParentKeys returns list of keys of parent section.
func (s * Section) ParentKeys() []*Key {
var parentKeys []*Key
sname := s.name
for {
if i := strings.LastIndex(sname, "."); i > -1 {
sname = sname[:i]
sec, err := s.f.GetSection(sname)
if err != nil {
continue
}
parentKeys = append(parentKeys, sec.Keys()...)
} else {
break
}
}
return parentKeys
}
// KeyStrings returns list of key names of section.
func (s *Section) KeyStrings() []string {
list := make([]string, len(s.keyList))
copy(list, s.keyList)
return list
}
// KeysHash returns keys hash consisting of names and values.
func (s *Section) KeysHash() map[string]string {
if s.f.BlockMode {
s.f.lock.RLock()
defer s.f.lock.RUnlock()
}
hash := map[string]string{}
for key, value := range s.keysHash {
hash[key] = value
}
return hash
}
// DeleteKey deletes a key from section.
func (s *Section) DeleteKey(name string) {
if s.f.BlockMode {
s.f.lock.Lock()
defer s.f.lock.Unlock()
}
for i, k := range s.keyList {
if k == name {
s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
delete(s.keys, name)
return
}
}
}

351
vendor/github.com/go-ini/ini/struct.go

@ -0,0 +1,351 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bytes"
"errors"
"fmt"
"reflect"
"time"
"unicode"
)
// NameMapper represents a ini tag name mapper.
type NameMapper func(string) string
// Built-in name getters.
var (
// AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
AllCapsUnderscore NameMapper = func(raw string) string {
newstr := make([]rune, 0, len(raw))
for i, chr := range raw {
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
if i > 0 {
newstr = append(newstr, '_')
}
}
newstr = append(newstr, unicode.ToUpper(chr))
}
return string(newstr)
}
// TitleUnderscore converts to format title_underscore.
TitleUnderscore NameMapper = func(raw string) string {
newstr := make([]rune, 0, len(raw))
for i, chr := range raw {
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
if i > 0 {
newstr = append(newstr, '_')
}
chr -= ('A' - 'a')
}
newstr = append(newstr, chr)
}
return string(newstr)
}
)
func (s *Section) parseFieldName(raw, actual string) string {
if len(actual) > 0 {
return actual
}
if s.f.NameMapper != nil {
return s.f.NameMapper(raw)
}
return raw
}
func parseDelim(actual string) string {
if len(actual) > 0 {
return actual
}
return ","
}
var reflectTime = reflect.TypeOf(time.Now()).Kind()
// setWithProperType sets proper value to field based on its type,
// but it does not return error for failing parsing,
// because we want to use default value that is already assigned to strcut.
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
switch t.Kind() {
case reflect.String:
if len(key.String()) == 0 {
return nil
}
field.SetString(key.String())
case reflect.Bool:
boolVal, err := key.Bool()
if err != nil {
return nil
}
field.SetBool(boolVal)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
durationVal, err := key.Duration()
// Skip zero value
if err == nil && int(durationVal) > 0 {
field.Set(reflect.ValueOf(durationVal))
return nil
}
intVal, err := key.Int64()
if err != nil || intVal == 0 {
return nil
}
field.SetInt(intVal)
// byte is an alias for uint8, so supporting uint8 breaks support for byte
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
durationVal, err := key.Duration()
if err == nil {
field.Set(reflect.ValueOf(durationVal))
return nil
}
uintVal, err := key.Uint64()
if err != nil {
return nil
}
field.SetUint(uintVal)
case reflect.Float64:
floatVal, err := key.Float64()
if err != nil {
return nil
}
field.SetFloat(floatVal)
case reflectTime:
timeVal, err := key.Time()
if err != nil {
return nil
}
field.Set(reflect.ValueOf(timeVal))
case reflect.Slice:
vals := key.Strings(delim)
numVals := len(vals)
if numVals == 0 {
return nil
}
sliceOf := field.Type().Elem().Kind()
var times []time.Time
if sliceOf == reflectTime {
times = key.Times(delim)
}
slice := reflect.MakeSlice(field.Type(), numVals, numVals)
for i := 0; i < numVals; i++ {
switch sliceOf {
case reflectTime:
slice.Index(i).Set(reflect.ValueOf(times[i]))
default:
slice.Index(i).Set(reflect.ValueOf(vals[i]))
}
}
field.Set(slice)
default:
return fmt.Errorf("unsupported type '%s'", t)
}
return nil
}
func (s *Section) mapTo(val reflect.Value) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
typ := val.Type()
for i := 0; i < typ.NumField(); i++ {
field := val.Field(i)
tpField := typ.Field(i)
tag := tpField.Tag.Get("ini")
if tag == "-" {
continue
}
fieldName := s.parseFieldName(tpField.Name, tag)
if len(fieldName) == 0 || !field.CanSet() {
continue
}
isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
isStruct := tpField.Type.Kind() == reflect.Struct
if isAnonymous {
field.Set(reflect.New(tpField.Type.Elem()))
}
if isAnonymous || isStruct {
if sec, err := s.f.GetSection(fieldName); err == nil {
if err = sec.mapTo(field); err != nil {
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
}
continue
}
}
if key, err := s.GetKey(fieldName); err == nil {
if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
}
}
}
return nil
}
// MapTo maps section to given struct.
func (s *Section) MapTo(v interface{}) error {
typ := reflect.TypeOf(v)
val := reflect.ValueOf(v)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
val = val.Elem()
} else {
return errors.New("cannot map to non-pointer struct")
}
return s.mapTo(val)
}
// MapTo maps file to given struct.
func (f *File) MapTo(v interface{}) error {
return f.Section("").MapTo(v)
}
// MapTo maps data sources to given struct with name mapper.
func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
cfg, err := Load(source, others...)
if err != nil {
return err
}
cfg.NameMapper = mapper
return cfg.MapTo(v)
}
// MapTo maps data sources to given struct.
func MapTo(v, source interface{}, others ...interface{}) error {
return MapToWithMapper(v, nil, source, others...)
}
// reflectWithProperType does the opposite thing with setWithProperType.
func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
switch t.Kind() {
case reflect.String:
key.SetValue(field.String())
case reflect.Bool,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Float64,
reflectTime:
key.SetValue(fmt.Sprint(field))
case reflect.Slice:
vals := field.Slice(0, field.Len())
if field.Len() == 0 {
return nil
}
var buf bytes.Buffer
isTime := fmt.Sprint(field.Type()) == "[]time.Time"
for i := 0; i < field.Len(); i++ {
if isTime {
buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339))
} else {
buf.WriteString(fmt.Sprint(vals.Index(i)))
}
buf.WriteString(delim)
}
key.SetValue(buf.String()[:buf.Len()-1])
default:
return fmt.Errorf("unsupported type '%s'", t)
}
return nil
}
func (s *Section) reflectFrom(val reflect.Value) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
typ := val.Type()
for i := 0; i < typ.NumField(); i++ {
field := val.Field(i)
tpField := typ.Field(i)
tag := tpField.Tag.Get("ini")
if tag == "-" {
continue
}
fieldName := s.parseFieldName(tpField.Name, tag)
if len(fieldName) == 0 || !field.CanSet() {
continue
}
if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
(tpField.Type.Kind() == reflect.Struct) {
// Note: The only error here is section doesn't exist.
sec, err := s.f.GetSection(fieldName)
if err != nil {
// Note: fieldName can never be empty here, ignore error.
sec, _ = s.f.NewSection(fieldName)
}
if err = sec.reflectFrom(field); err != nil {
return fmt.Errorf("error reflecting field(%s): %v", fieldName, err)
}
continue
}
// Note: Same reason as secion.
key, err := s.GetKey(fieldName)
if err != nil {
key, _ = s.NewKey(fieldName, "")
}
if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
return fmt.Errorf("error reflecting field(%s): %v", fieldName, err)
}
}
return nil
}
// ReflectFrom reflects secion from given struct.
func (s *Section) ReflectFrom(v interface{}) error {
typ := reflect.TypeOf(v)
val := reflect.ValueOf(v)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
val = val.Elem()
} else {
return errors.New("cannot reflect from non-pointer struct")
}
return s.reflectFrom(val)
}
// ReflectFrom reflects file from given struct.
func (f *File) ReflectFrom(v interface{}) error {
return f.Section("").ReflectFrom(v)
}
// ReflectFrom reflects data sources from given struct with name mapper.
func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
cfg.NameMapper = mapper
return cfg.ReflectFrom(v)
}
// ReflectFrom reflects data sources from given struct.
func ReflectFrom(cfg *File, v interface{}) error {
return ReflectFromWithMapper(cfg, v, nil)
}

13
vendor/github.com/jmespath/go-jmespath/LICENSE

@ -0,0 +1,13 @@
Copyright 2015 James Saryerwinnie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

44
vendor/github.com/jmespath/go-jmespath/Makefile

@ -0,0 +1,44 @@
CMD = jpgo
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " test to run all the tests"
@echo " build to build the library and jp executable"
@echo " generate to run codegen"
generate:
go generate ./...
build:
rm -f $(CMD)
go build ./...
rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
mv cmd/$(CMD)/$(CMD) .
test:
go test -v ./...
check:
go vet ./...
@echo "golint ./..."
@lint=`golint ./...`; \
lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
echo "$$lint"; \
if [ "$$lint" != "" ]; then exit 1; fi
htmlc:
go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov
buildfuzz:
go-fuzz-build github.com/jmespath/go-jmespath/fuzz
fuzz: buildfuzz
go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata
bench:
go test -bench . -cpuprofile cpu.out
pprof-cpu:
go tool pprof ./go-jmespath.test ./cpu.out

7
vendor/github.com/jmespath/go-jmespath/README.md

@ -0,0 +1,7 @@
# go-jmespath - A JMESPath implementation in Go
[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath)
See http://jmespath.org for more info.

49
vendor/github.com/jmespath/go-jmespath/api.go

@ -0,0 +1,49 @@
package jmespath
import "strconv"
// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is
// safe for concurrent use by multiple goroutines.
type JMESPath struct {
ast ASTNode
intr *treeInterpreter
}
// Compile parses a JMESPath expression and returns, if successful, a JMESPath
// object that can be used to match against data.
func Compile(expression string) (*JMESPath, error) {
parser := NewParser()
ast, err := parser.Parse(expression)
if err != nil {
return nil, err
}
jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
return jmespath, nil
}
// MustCompile is like Compile but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled
// JMESPaths.
func MustCompile(expression string) *JMESPath {
jmespath, err := Compile(expression)
if err != nil {
panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
}
return jmespath
}
// Search evaluates a JMESPath expression against input data and returns the result.
func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
return jp.intr.Execute(jp.ast, data)
}
// Search evaluates a JMESPath expression against input data and returns the result.
func Search(expression string, data interface{}) (interface{}, error) {
intr := newInterpreter()
parser := NewParser()
ast, err := parser.Parse(expression)
if err != nil {
return nil, err
}
return intr.Execute(ast, data)
}

16
vendor/github.com/jmespath/go-jmespath/astnodetype_string.go

@ -0,0 +1,16 @@
// generated by stringer -type astNodeType; DO NOT EDIT
package jmespath
import "fmt"
const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection"
var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307}
func (i astNodeType) String() string {
if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) {
return fmt.Sprintf("astNodeType(%d)", i)
}
return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]]
}

842
vendor/github.com/jmespath/go-jmespath/functions.go

@ -0,0 +1,842 @@
package jmespath
import (
"encoding/json"
"errors"
"fmt"
"math"
"reflect"
"sort"
"strconv"
"strings"
"unicode/utf8"
)
type jpFunction func(arguments []interface{}) (interface{}, error)
type jpType string
const (
jpUnknown jpType = "unknown"
jpNumber jpType = "number"
jpString jpType = "string"
jpArray jpType = "array"
jpObject jpType = "object"
jpArrayNumber jpType = "array[number]"
jpArrayString jpType = "array[string]"
jpExpref jpType = "expref"
jpAny jpType = "any"
)
type functionEntry struct {
name string
arguments []argSpec
handler jpFunction
hasExpRef bool
}
type argSpec struct {
types []jpType
variadic bool
}
type byExprString struct {
intr *treeInterpreter
node ASTNode
items []interface{}
hasError bool
}
func (a *byExprString) Len() int {
return len(a.items)
}
func (a *byExprString) Swap(i, j int) {
a.items[i], a.items[j] = a.items[j], a.items[i]
}
func (a *byExprString) Less(i, j int) bool {
first, err := a.intr.Execute(a.node, a.items[i])
if err != nil {
a.hasError = true
// Return a dummy value.
return true
}
ith, ok := first.(string)
if !ok {
a.hasError = true
return true
}
second, err := a.intr.Execute(a.node, a.items[j])
if err != nil {
a.hasError = true
// Return a dummy value.
return true
}
jth, ok := second.(string)
if !ok {
a.hasError = true
return true
}
return ith < jth
}
type byExprFloat struct {
intr *treeInterpreter
node ASTNode
items []interface{}
hasError bool
}
func (a *byExprFloat) Len() int {
return len(a.items)
}
func (a *byExprFloat) Swap(i, j int) {
a.items[i], a.items[j] = a.items[j], a.items[i]
}
func (a *byExprFloat) Less(i, j int) bool {
first, err := a.intr.Execute(a.node, a.items[i])
if err != nil {
a.hasError = true
// Return a dummy value.
return true
}
ith, ok := first.(float64)
if !ok {
a.hasError = true
return true
}
second, err := a.intr.Execute(a.node, a.items[j])
if err != nil {
a.hasError = true
// Return a dummy value.
return true
}
jth, ok := second.(float64)
if !ok {
a.hasError = true
return true
}
return ith < jth
}
type functionCaller struct {
functionTable map[string]functionEntry
}
func newFunctionCaller() *functionCaller {
caller := &functionCaller{}
caller.functionTable = map[string]functionEntry{
"length": {
name: "length",
arguments: []argSpec{
{types: []jpType{jpString, jpArray, jpObject}},
},
handler: jpfLength,
},
"starts_with": {
name: "starts_with",
arguments: []argSpec{
{types: []jpType{jpString}},
{types: []jpType{jpString}},
},
handler: jpfStartsWith,
},
"abs": {
name: "abs",
arguments: []argSpec{
{types: []jpType{jpNumber}},
},
handler: jpfAbs,
},
"avg": {
name: "avg",
arguments: []argSpec{
{types: []jpType{jpArrayNumber}},
},
handler: jpfAvg,
},
"ceil": {
name: "ceil",
arguments: []argSpec{
{types: []jpType{jpNumber}},
},
handler: jpfCeil,
},
"contains": {
name: "contains",
arguments: []argSpec{
{types: []jpType{jpArray, jpString}},
{types: []jpType{jpAny}},
},
handler: jpfContains,
},
"ends_with": {
name: "ends_with",
arguments: []argSpec{
{types: []jpType{jpString}},
{types: []jpType{jpString}},
},
handler: jpfEndsWith,
},
"floor": {
name: "floor",
arguments: []argSpec{
{types: []jpType{jpNumber}},
},
handler: jpfFloor,
},
"map": {
name: "amp",
arguments: []argSpec{
{types: []jpType{jpExpref}},
{types: []jpType{jpArray}},
},
handler: jpfMap,
hasExpRef: true,
},
"max": {
name: "max",
arguments: []argSpec{
{types: []jpType{jpArrayNumber, jpArrayString}},
},
handler: jpfMax,
},
"merge": {
name: "merge",
arguments: []argSpec{
{types: []jpType{jpObject}, variadic: true},
},
handler: jpfMerge,
},
"max_by": {
name: "max_by",
arguments: []argSpec{
{types: []jpType{jpArray}},
{types: []jpType{jpExpref}},
},
handler: jpfMaxBy,
hasExpRef: true,
},
"sum": {
name: "sum",
arguments: []argSpec{
{types: []jpType{jpArrayNumber}},
},
handler: jpfSum,
},
"min": {
name: "min",
arguments: []argSpec{
{types: []jpType{jpArrayNumber, jpArrayString}},
},
handler: jpfMin,
},
"min_by": {
name: "min_by",
arguments: []argSpec{
{types: []jpType{jpArray}},
{types: []jpType{jpExpref}},
},
handler: jpfMinBy,
hasExpRef: true,
},
"type": {
name: "type",
arguments: []argSpec{
{types: []jpType{jpAny}},
},
handler: jpfType,
},
"keys": {
name: "keys",
arguments: []argSpec{
{types: []jpType{jpObject}},
},
handler: jpfKeys,
},
"values": {
name: "values",
arguments: []argSpec{
{types: []jpType{jpObject}},
},
handler: jpfValues,
},
"sort": {
name: "sort",
arguments: []argSpec{
{types: []jpType{jpArrayString, jpArrayNumber}},
},
handler: jpfSort,
},
"sort_by": {
name: "sort_by",
arguments: []argSpec{
{types: []jpType{jpArray}},
{types: []jpType{jpExpref}},
},
handler: jpfSortBy,
hasExpRef: true,
},
"join": {
name: "join",
arguments: []argSpec{
{types: []jpType{jpString}},
{types: []jpType{jpArrayString}},
},
handler: jpfJoin,
},
"reverse": {
name: "reverse",
arguments: []argSpec{
{types: []jpType{jpArray, jpString}},
},
handler: jpfReverse,
},
"to_array": {
name: "to_array",
arguments: []argSpec{
{types: []jpType{jpAny}},
},
handler: jpfToArray,
},
"to_string": {
name: "to_string",
arguments: []argSpec{
{types: []jpType{jpAny}},
},
handler: jpfToString,
},
"to_number": {
name: "to_number",
arguments: []argSpec{
{types: []jpType{jpAny}},
},
handler: jpfToNumber,
},
"not_null": {
name: "not_null",
arguments: []argSpec{
{types: []jpType{jpAny}, variadic: true},
},
handler: jpfNotNull,
},
}
return caller
}
func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) {
if len(e.arguments) == 0 {
return arguments, nil
}
if !e.arguments[len(e.arguments)-1].variadic {
if len(e.arguments) != len(arguments) {
return nil, errors.New("incorrect number of args")
}
for i, spec := range e.arguments {
userArg := arguments[i]
err := spec.typeCheck(userArg)
if err != nil {
return nil, err
}
}
return arguments, nil
}
if len(arguments) < len(e.arguments) {
return nil, errors.New("Invalid arity.")
}
return arguments, nil
}
func (a *argSpec) typeCheck(arg interface{}) error {
for _, t := range a.types {
switch t {
case jpNumber:
if _, ok := arg.(float64); ok {
return nil
}
case jpString:
if _, ok := arg.(string); ok {
return nil
}
case jpArray:
if isSliceType(arg) {
return nil
}
case jpObject:
if _, ok := arg.(map[string]interface{}); ok {
return nil
}
case jpArrayNumber:
if _, ok := toArrayNum(arg); ok {
return nil
}
case jpArrayString:
if _, ok := toArrayStr(arg); ok {
return nil
}
case jpAny:
return nil
case jpExpref:
if _, ok := arg.(expRef); ok {
return nil
}
}
}
return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types)
}
func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) {
entry, ok := f.functionTable[name]
if !ok {
return nil, errors.New("unknown function: " + name)
}
resolvedArgs, err := entry.resolveArgs(arguments)
if err != nil {
return nil, err
}
if entry.hasExpRef {
var extra []interface{}
extra = append(extra, intr)
resolvedArgs = append(extra, resolvedArgs...)
}
return entry.handler(resolvedArgs)
}
func jpfAbs(arguments []interface{}) (interface{}, error) {
num := arguments[0].(float64)
return math.Abs(num), nil
}
func jpfLength(arguments []interface{}) (interface{}, error) {
arg := arguments[0]
if c, ok := arg.(string); ok {
return float64(utf8.RuneCountInString(c)), nil
} else if isSliceType(arg) {
v := reflect.ValueOf(arg)
return float64(v.Len()), nil
} else if c, ok := arg.(map[string]interface{}); ok {
return float64(len(c)), nil
}
return nil, errors.New("could not compute length()")
}
func jpfStartsWith(arguments []interface{}) (interface{}, error) {
search := arguments[0].(string)
prefix := arguments[1].(string)
return strings.HasPrefix(search, prefix), nil
}
func jpfAvg(arguments []interface{}) (interface{}, error) {
// We've already type checked the value so we can safely use
// type assertions.
args := arguments[0].([]interface{})
length := float64(len(args))
numerator := 0.0
for _, n := range args {
numerator += n.(float64)
}
return numerator / length, nil
}
func jpfCeil(arguments []interface{}) (interface{}, error) {
val := arguments[0].(float64)
return math.Ceil(val), nil
}
func jpfContains(arguments []interface{}) (interface{}, error) {
search := arguments[0]
el := arguments[1]
if searchStr, ok := search.(string); ok {
if elStr, ok := el.(string); ok {
return strings.Index(searchStr, elStr) != -1, nil
}
return false, nil
}
// Otherwise this is a generic contains for []interface{}
general := search.([]interface{})
for _, item := range general {
if item == el {
return true, nil
}
}
return false, nil
}
func jpfEndsWith(arguments []interface{}) (interface{}, error) {
search := arguments[0].(string)
suffix := arguments[1].(string)
return strings.HasSuffix(search, suffix), nil
}
func jpfFloor(arguments []interface{}) (interface{}, error) {
val := arguments[0].(float64)
return math.Floor(val), nil
}
func jpfMap(arguments []interface{}) (interface{}, error) {
intr := arguments[0].(*treeInterpreter)
exp := arguments[1].(expRef)
node := exp.ref
arr := arguments[2].([]interface{})
mapped := make([]interface{}, 0, len(arr))
for _, value := range arr {
current, err := intr.Execute(node, value)
if err != nil {
return nil, err
}
mapped = append(mapped, current)
}
return mapped, nil
}
func jpfMax(arguments []interface{}) (interface{}, error) {
if items, ok := toArrayNum(arguments[0]); ok {
if len(items) == 0 {
return nil, nil
}
if len(items) == 1 {
return items[0], nil
}
best := items[0]
for _, item := range items[1:] {
if item > best {
best = item
}
}
return best, nil
}
// Otherwise we're dealing with a max() of strings.
items, _ := toArrayStr(arguments[0])
if len(items) == 0 {
return nil, nil
}
if len(items) == 1 {
return items[0], nil
}
best := items[0]
for _, item := range items[1:] {
if item > best {
best = item
}
}
return best, nil
}
func jpfMerge(arguments []interface{}) (interface{}, error) {
final := make(map[string]interface{})
for _, m := range arguments {
mapped := m.(map[string]interface{})
for key, value := range mapped {
final[key] = value
}
}
return final, nil
}
func jpfMaxBy(arguments []interface{}) (interface{}, error) {
intr := arguments[0].(*treeInterpreter)
arr := arguments[1].([]interface{})
exp := arguments[2].(expRef)
node := exp.ref
if len(arr) == 0 {
return nil, nil
} else if len(arr) == 1 {
return arr[0], nil
}
start, err := intr.Execute(node, arr[0])
if err != nil {
return nil, err
}
switch t := start.(type) {
case float64:
bestVal := t
bestItem := arr[0]
for _, item := range arr[1:] {
result, err := intr.Execute(node, item)
if err != nil {
return nil, err
}
current, ok := result.(float64)
if !ok {
return nil, errors.New("invalid type, must be number")
}
if current > bestVal {
bestVal = current
bestItem = item
}
}
return bestItem, nil
case string:
bestVal := t
bestItem := arr[0]
for _, item := range arr[1:] {
result, err := intr.Execute(node, item)
if err != nil {
return nil, err
}
current, ok := result.(string)
if !ok {
return nil, errors.New("invalid type, must be string")
}
if current > bestVal {
bestVal = current
bestItem = item
}
}
return bestItem, nil
default:
return nil, errors.New("invalid type, must be number of string")
}
}
func jpfSum(arguments []interface{}) (interface{}, error) {
items, _ := toArrayNum(arguments[0])
sum := 0.0
for _, item := range items {
sum += item
}
return sum, nil
}
func jpfMin(arguments []interface{}) (interface{}, error) {
if items, ok := toArrayNum(arguments[0]); ok {
if len(items) == 0 {
return nil, nil
}
if len(items) == 1 {
return items[0], nil
}
best := items[0]
for _, item := range items[1:] {
if item < best {
best = item
}
}
return best, nil
}
items, _ := toArrayStr(arguments[0])
if len(items) == 0 {
return nil, nil
}
if len(items) == 1 {
return items[0], nil
}
best := items[0]
for _, item := range items[1:] {
if item < best {
best = item
}
}
return best, nil
}
func jpfMinBy(arguments []interface{}) (interface{}, error) {
intr := arguments[0].(*treeInterpreter)
arr := arguments[1].([]interface{})
exp := arguments[2].(expRef)
node := exp.ref
if len(arr) == 0 {
return nil, nil
} else if len(arr) == 1 {
return arr[0], nil
}
start, err := intr.Execute(node, arr[0])
if err != nil {
return nil, err
}
if t, ok := start.(float64); ok {
bestVal := t
bestItem := arr[0]
for _, item := range arr[1:] {
result, err := intr.Execute(node, item)
if err != nil {
return nil, err
}
current, ok := result.(float64)
if !ok {
return nil, errors.New("invalid type, must be number")
}
if current < bestVal {
bestVal = current
bestItem = item
}
}
return bestItem, nil
} else if t, ok := start.(string); ok {
bestVal := t
bestItem := arr[0]
for _, item := range arr[1:] {
result, err := intr.Execute(node, item)
if err != nil {
return nil, err
}
current, ok := result.(string)
if !ok {
return nil, errors.New("invalid type, must be string")
}
if current < bestVal {
bestVal = current
bestItem = item
}
}
return bestItem, nil
} else {
return nil, errors.New("invalid type, must be number of string")
}
}
func jpfType(arguments []interface{}) (interface{}, error) {
arg := arguments[0]
if _, ok := arg.(float64); ok {
return "number", nil
}
if _, ok := arg.(string); ok {
return "string", nil
}
if _, ok := arg.([]interface{}); ok {
return "array", nil
}
if _, ok := arg.(map[string]interface{}); ok {
return "object", nil
}
if arg == nil {
return "null", nil
}
if arg == true || arg == false {
return "boolean", nil
}
return nil, errors.New("unknown type")
}
func jpfKeys(arguments []interface{}) (interface{}, error) {
arg := arguments[0].(map[string]interface{})
collected := make([]interface{}, 0, len(arg))
for key := range arg {
collected = append(collected, key)
}
return collected, nil
}
func jpfValues(arguments []interface{}) (interface{}, error) {
arg := arguments[0].(map[string]interface{})
collected := make([]interface{}, 0, len(arg))
for _, value := range arg {
collected = append(collected, value)
}
return collected, nil
}
func jpfSort(arguments []interface{}) (interface{}, error) {
if items, ok := toArrayNum(arguments[0]); ok {
d := sort.Float64Slice(items)
sort.Stable(d)
final := make([]interface{}, len(d))
for i, val := range d {
final[i] = val
}
return final, nil
}
// Otherwise we're dealing with sort()'ing strings.
items, _ := toArrayStr(arguments[0])
d := sort.StringSlice(items)
sort.Stable(d)
final := make([]interface{}, len(d))
for i, val := range d {
final[i] = val
}
return final, nil
}
func jpfSortBy(arguments []interface{}) (interface{}, error) {
intr := arguments[0].(*treeInterpreter)
arr := arguments[1].([]interface{})
exp := arguments[2].(expRef)
node := exp.ref
if len(arr) == 0 {
return arr, nil
} else if len(arr) == 1 {
return arr, nil
}
start, err := intr.Execute(node, arr[0])
if err != nil {
return nil, err
}
if _, ok := start.(float64); ok {
sortable := &byExprFloat{intr, node, arr, false}
sort.Stable(sortable)
if sortable.hasError {
return nil, errors.New("error in sort_by comparison")
}
return arr, nil
} else if _, ok := start.(string); ok {
sortable := &byExprString{intr, node, arr, false}
sort.Stable(sortable)
if sortable.hasError {
return nil, errors.New("error in sort_by comparison")
}
return arr, nil
} else {
return nil, errors.New("invalid type, must be number of string")
}
}
func jpfJoin(arguments []interface{}) (interface{}, error) {
sep := arguments[0].(string)
// We can't just do arguments[1].([]string), we have to
// manually convert each item to a string.
arrayStr := []string{}
for _, item := range arguments[1].([]interface{}) {
arrayStr = append(arrayStr, item.(string))
}
return strings.Join(arrayStr, sep), nil
}
func jpfReverse(arguments []interface{}) (interface{}, error) {
if s, ok := arguments[0].(string); ok {
r := []rune(s)
for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
r[i], r[j] = r[j], r[i]
}
return string(r), nil
}
items := arguments[0].([]interface{})
length := len(items)
reversed := make([]interface{}, length)
for i, item := range items {
reversed[length-(i+1)] = item
}
return reversed, nil
}
func jpfToArray(arguments []interface{}) (interface{}, error) {
if _, ok := arguments[0].([]interface{}); ok {
return arguments[0], nil
}
return arguments[:1:1], nil
}
func jpfToString(arguments []interface{}) (interface{}, error) {
if v, ok := arguments[0].(string); ok {
return v, nil
}
result, err := json.Marshal(arguments[0])
if err != nil {
return nil, err
}
return string(result), nil
}
func jpfToNumber(arguments []interface{}) (interface{}, error) {
arg := arguments[0]
if v, ok := arg.(float64); ok {
return v, nil
}
if v, ok := arg.(string); ok {
conv, err := strconv.ParseFloat(v, 64)
if err != nil {
return nil, nil
}
return conv, nil
}
if _, ok := arg.([]interface{}); ok {
return nil, nil
}
if _, ok := arg.(map[string]interface{}); ok {
return nil, nil
}
if arg == nil {
return nil, nil
}
if arg == true || arg == false {
return nil, nil
}
return nil, errors.New("unknown type")
}
func jpfNotNull(arguments []interface{}) (interface{}, error) {
for _, arg := range arguments {
if arg != nil {
return arg, nil
}
}
return nil, nil
}

418
vendor/github.com/jmespath/go-jmespath/interpreter.go

@ -0,0 +1,418 @@
package jmespath
import (
"errors"
"reflect"
"unicode"
"unicode/utf8"
)
/* This is a tree based interpreter. It walks the AST and directly
interprets the AST to search through a JSON document.
*/
type treeInterpreter struct {
fCall *functionCaller
}
func newInterpreter() *treeInterpreter {
interpreter := treeInterpreter{}
interpreter.fCall = newFunctionCaller()
return &interpreter
}
type expRef struct {
ref ASTNode
}
// Execute takes an ASTNode and input data and interprets the AST directly.
// It will produce the result of applying the JMESPath expression associated
// with the ASTNode to the input data "value".
func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) {
switch node.nodeType {
case ASTComparator:
left, err := intr.Execute(node.children[0], value)
if err != nil {
return nil, err
}
right, err := intr.Execute(node.children[1], value)
if err != nil {
return nil, err
}
switch node.value {
case tEQ:
return objsEqual(left, right), nil
case tNE:
return !objsEqual(left, right), nil
}
leftNum, ok := left.(float64)
if !ok {
return nil, nil
}
rightNum, ok := right.(float64)
if !ok {
return nil, nil
}
switch node.value {
case tGT:
return leftNum > rightNum, nil
case tGTE:
return leftNum >= rightNum, nil
case tLT:
return leftNum < rightNum, nil
case tLTE:
return leftNum <= rightNum, nil
}
case ASTExpRef:
return expRef{ref: node.children[0]}, nil
case ASTFunctionExpression:
resolvedArgs := []interface{}{}
for _, arg := range node.children {
current, err := intr.Execute(arg, value)
if err != nil {
return nil, err
}
resolvedArgs = append(resolvedArgs, current)
}
return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr)
case ASTField:
if m, ok := value.(map[string]interface{}); ok {
key := node.value.(string)
return m[key], nil
}
return intr.fieldFromStruct(node.value.(string), value)
case ASTFilterProjection:
left, err := intr.Execute(node.children[0], value)
if err != nil {
return nil, nil
}
sliceType, ok := left.([]interface{})
if !ok {
if isSliceType(left) {
return intr.filterProjectionWithReflection(node, left)
}
return nil, nil
}
compareNode := node.children[2]
collected := []interface{}{}
for _, element := range sliceType {
result, err := intr.Execute(compareNode, element)
if err != nil {
return nil, err
}
if !isFalse(result) {
current, err := intr.Execute(node.children[1], element)
if err != nil {
return nil, err
}
if current != nil {
collected = append(collected, current)
}
}
}
return collected, nil
case ASTFlatten:
left, err := intr.Execute(node.children[0], value)
if err != nil {
return nil, nil
}
sliceType, ok := left.([]interface{})
if !ok {
// If we can't type convert to []interface{}, there's
// a chance this could still work via reflection if we're
// dealing with user provided types.
if isSliceType(left) {
return intr.flattenWithReflection(left)
}
return nil, nil
}
flattened := []interface{}{}
for _, element := range sliceType {
if elementSlice, ok := element.([]interface{}); ok {
flattened = append(flattened, elementSlice...)
} else if isSliceType(element) {
reflectFlat := []interface{}{}
v := reflect.ValueOf(element)
for i := 0; i < v.Len(); i++ {
reflectFlat = append(reflectFlat, v.Index(i).Interface())
}
flattened = append(flattened, reflectFlat...)
} else {
flattened = append(flattened, element)
}
}
return flattened, nil
case ASTIdentity, ASTCurrentNode:
return value, nil
case ASTIndex:
if sliceType, ok := value.([]interface{}); ok {
index := node.value.(int)
if index < 0 {
index += len(sliceType)
}
if index < len(sliceType) && index >= 0 {
return sliceType[index], nil
}
return nil, nil
}
// Otherwise try via reflection.
rv := reflect.ValueOf(value)
if rv.Kind() == reflect.Slice {
index := node.value.(int)
if index < 0 {
index += rv.Len()
}
if index < rv.Len() && index >= 0 {
v := rv.Index(index)
return v.Interface(), nil
}
}
return nil, nil
case ASTKeyValPair:
return intr.Execute(node.children[0], value)
case ASTLiteral:
return node.value, nil
case ASTMultiSelectHash:
if value == nil {
return nil, nil
}
collected := make(map[string]interface{})
for _, child := range node.children {
current, err := intr.Execute(child, value)
if err != nil {
return nil, err
}
key := child.value.(string)
collected[key] = current
}
return collected, nil
case ASTMultiSelectList:
if value == nil {
return nil, nil
}
collected := []interface{}{}
for _, child := range node.children {
current, err := intr.Execute(child, value)
if err != nil {
return nil, err
}
collected = append(collected, current)
}
return collected, nil
case ASTOrExpression:
matched, err := intr.Execute(node.children[0], value)
if err != nil {
return nil, err
}
if isFalse(matched) {
matched, err = intr.Execute(node.children[1], value)
if err != nil {
return nil, err
}
}
return matched, nil
case ASTAndExpression:
matched, err := intr.Execute(node.children[0], value)
if err != nil {
return nil, err
}
if isFalse(matched) {
return matched, nil
}
return intr.Execute(node.children[1], value)
case ASTNotExpression:
matched, err := intr.Execute(node.children[0], value)
if err != nil {
return nil, err
}
if isFalse(matched) {
return true, nil
}
return false, nil
case ASTPipe:
result := value
var err error
for _, child := range node.children {
result, err = intr.Execute(child, result)
if err != nil {
return nil, err
}
}
return result, nil
case ASTProjection:
left, err := intr.Execute(node.children[0], value)
if err != nil {
return nil, err
}
sliceType, ok := left.([]interface{})
if !ok {
if isSliceType(left) {
return intr.projectWithReflection(node, left)
}
return nil, nil
}
collected := []interface{}{}
var current interface{}
for _, element := range sliceType {
current, err = intr.Execute(node.children[1], element)
if err != nil {
return nil, err
}
if current != nil {
collected = append(collected, current)
}
}
return collected, nil
case ASTSubexpression, ASTIndexExpression:
left, err := intr.Execute(node.children[0], value)
if err != nil {
return nil, err
}
return intr.Execute(node.children[1], left)
case ASTSlice:
sliceType, ok := value.([]interface{})
if !ok {
if isSliceType(value) {
return intr.sliceWithReflection(node, value)
}
return nil, nil
}
parts := node.value.([]*int)
sliceParams := make([]sliceParam, 3)
for i, part := range parts {
if part != nil {
sliceParams[i].Specified = true
sliceParams[i].N = *part
}
}
return slice(sliceType, sliceParams)
case ASTValueProjection:
left, err := intr.Execute(node.children[0], value)
if err != nil {
return nil, nil
}
mapType, ok := left.(map[string]interface{})
if !ok {
return nil, nil
}
values := make([]interface{}, len(mapType))
for _, value := range mapType {
values = append(values, value)
}
collected := []interface{}{}
for _, element := range values {
current, err := intr.Execute(node.children[1], element)
if err != nil {
return nil, err
}
if current != nil {
collected = append(collected, current)
}
}
return collected, nil
}
return nil, errors.New("Unknown AST node: " + node.nodeType.String())
}
func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) {
rv := reflect.ValueOf(value)
first, n := utf8.DecodeRuneInString(key)
fieldName := string(unicode.ToUpper(first)) + key[n:]
if rv.Kind() == reflect.Struct {
v := rv.FieldByName(fieldName)
if !v.IsValid() {
return nil, nil
}
return v.Interface(), nil
} else if rv.Kind() == reflect.Ptr {
// Handle multiple levels of indirection?
if rv.IsNil() {
return nil, nil
}
rv = rv.Elem()
v := rv.FieldByName(fieldName)
if !v.IsValid() {
return nil, nil
}
return v.Interface(), nil
}
return nil, nil
}
func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) {
v := reflect.ValueOf(value)
flattened := []interface{}{}
for i := 0; i < v.Len(); i++ {
element := v.Index(i).Interface()
if reflect.TypeOf(element).Kind() == reflect.Slice {
// Then insert the contents of the element
// slice into the flattened slice,
// i.e flattened = append(flattened, mySlice...)
elementV := reflect.ValueOf(element)
for j := 0; j < elementV.Len(); j++ {
flattened = append(
flattened, elementV.Index(j).Interface())
}
} else {
flattened = append(flattened, element)
}
}
return flattened, nil
}
func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) {
v := reflect.ValueOf(value)
parts := node.value.([]*int)
sliceParams := make([]sliceParam, 3)
for i, part := range parts {
if part != nil {
sliceParams[i].Specified = true
sliceParams[i].N = *part
}
}
final := []interface{}{}
for i := 0; i < v.Len(); i++ {
element := v.Index(i).Interface()
final = append(final, element)
}
return slice(final, sliceParams)
}
func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) {
compareNode := node.children[2]
collected := []interface{}{}
v := reflect.ValueOf(value)
for i := 0; i < v.Len(); i++ {
element := v.Index(i).Interface()
result, err := intr.Execute(compareNode, element)
if err != nil {
return nil, err
}
if !isFalse(result) {
current, err := intr.Execute(node.children[1], element)
if err != nil {
return nil, err
}
if current != nil {
collected = append(collected, current)
}
}
}
return collected, nil
}
func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) {
collected := []interface{}{}
v := reflect.ValueOf(value)
for i := 0; i < v.Len(); i++ {
element := v.Index(i).Interface()
result, err := intr.Execute(node.children[1], element)
if err != nil {
return nil, err
}
if result != nil {
collected = append(collected, result)
}
}
return collected, nil
}

420
vendor/github.com/jmespath/go-jmespath/lexer.go

@ -0,0 +1,420 @@
package jmespath
import (
"bytes"
"encoding/json"
"fmt"
"strconv"
"strings"
"unicode/utf8"
)
type token struct {
tokenType tokType
value string
position int
length int
}
type tokType int
const eof = -1
// Lexer contains information about the expression being tokenized.
type Lexer struct {
expression string // The expression provided by the user.
currentPos int // The current position in the string.
lastWidth int // The width of the current rune. This
buf bytes.Buffer // Internal buffer used for building up values.
}
// SyntaxError is the main error used whenever a lexing or parsing error occurs.
type SyntaxError struct {
msg string // Error message displayed to user
Expression string // Expression that generated a SyntaxError
Offset int // The location in the string where the error occurred
}
func (e SyntaxError) Error() string {
// In the future, it would be good to underline the specific
// location where the error occurred.
return "SyntaxError: " + e.msg
}
// HighlightLocation will show where the syntax error occurred.
// It will place a "^" character on a line below the expression
// at the point where the syntax error occurred.
func (e SyntaxError) HighlightLocation() string {
return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
}
//go:generate stringer -type=tokType
const (
tUnknown tokType = iota
tStar
tDot
tFilter
tFlatten
tLparen
tRparen
tLbracket
tRbracket
tLbrace
tRbrace
tOr
tPipe
tNumber
tUnquotedIdentifier
tQuotedIdentifier
tComma
tColon
tLT
tLTE
tGT
tGTE
tEQ
tNE
tJSONLiteral
tStringLiteral
tCurrent
tExpref
tAnd
tNot
tEOF
)
var basicTokens = map[rune]tokType{
'.': tDot,
'*': tStar,
',': tComma,
':': tColon,
'{': tLbrace,
'}': tRbrace,
']': tRbracket, // tLbracket not included because it could be "[]"
'(': tLparen,
')': tRparen,
'@': tCurrent,
}
// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
// When using this bitmask just be sure to shift the rune down 64 bits
// before checking against identifierStartBits.
const identifierStartBits uint64 = 576460745995190270
// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
var whiteSpace = map[rune]bool{
' ': true, '\t': true, '\n': true, '\r': true,
}
func (t token) String() string {
return fmt.Sprintf("Token{%+v, %s, %d, %d}",
t.tokenType, t.value, t.position, t.length)
}
// NewLexer creates a new JMESPath lexer.
func NewLexer() *Lexer {
lexer := Lexer{}
return &lexer
}
func (lexer *Lexer) next() rune {
if lexer.currentPos >= len(lexer.expression) {
lexer.lastWidth = 0
return eof
}
r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
lexer.lastWidth = w
lexer.currentPos += w
return r
}
func (lexer *Lexer) back() {
lexer.currentPos -= lexer.lastWidth
}
func (lexer *Lexer) peek() rune {
t := lexer.next()
lexer.back()
return t
}
// tokenize takes an expression and returns corresponding tokens.
func (lexer *Lexer) tokenize(expression string) ([]token, error) {
var tokens []token
lexer.expression = expression
lexer.currentPos = 0
lexer.lastWidth = 0
loop:
for {
r := lexer.next()
if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
t := lexer.consumeUnquotedIdentifier()
tokens = append(tokens, t)
} else if val, ok := basicTokens[r]; ok {
// Basic single char token.
t := token{
tokenType: val,
value: string(r),
position: lexer.currentPos - lexer.lastWidth,
length: 1,
}
tokens = append(tokens, t)
} else if r == '-' || (r >= '0' && r <= '9') {
t := lexer.consumeNumber()
tokens = append(tokens, t)
} else if r == '[' {
t := lexer.consumeLBracket()
tokens = append(tokens, t)
} else if r == '"' {
t, err := lexer.consumeQuotedIdentifier()
if err != nil {
return tokens, err
}
tokens = append(tokens, t)
} else if r == '\'' {
t, err := lexer.consumeRawStringLiteral()
if err != nil {
return tokens, err
}
tokens = append(tokens, t)
} else if r == '`' {
t, err := lexer.consumeLiteral()
if err != nil {
return tokens, err
}
tokens = append(tokens, t)
} else if r == '|' {
t := lexer.matchOrElse(r, '|', tOr, tPipe)
tokens = append(tokens, t)
} else if r == '<' {
t := lexer.matchOrElse(r, '=', tLTE, tLT)
tokens = append(tokens, t)
} else if r == '>' {
t := lexer.matchOrElse(r, '=', tGTE, tGT)
tokens = append(tokens, t)
} else if r == '!' {
t := lexer.matchOrElse(r, '=', tNE, tNot)
tokens = append(tokens, t)
} else if r == '=' {
t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
tokens = append(tokens, t)
} else if r == '&' {
t := lexer.matchOrElse(r, '&', tAnd, tExpref)
tokens = append(tokens, t)
} else if r == eof {
break loop
} else if _, ok := whiteSpace[r]; ok {
// Ignore whitespace
} else {
return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
}
}
tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
return tokens, nil
}
// Consume characters until the ending rune "r" is reached.
// If the end of the expression is reached before seeing the
// terminating rune "r", then an error is returned.
// If no error occurs then the matching substring is returned.
// The returned string will not include the ending rune.
func (lexer *Lexer) consumeUntil(end rune) (string, error) {
start := lexer.currentPos
current := lexer.next()
for current != end && current != eof {
if current == '\\' && lexer.peek() != eof {
lexer.next()
}
current = lexer.next()
}
if lexer.lastWidth == 0 {
// Then we hit an EOF so we never reached the closing
// delimiter.
return "", SyntaxError{
msg: "Unclosed delimiter: " + string(end),
Expression: lexer.expression,
Offset: len(lexer.expression),
}
}
return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
}
func (lexer *Lexer) consumeLiteral() (token, error) {
start := lexer.currentPos
value, err := lexer.consumeUntil('`')
if err != nil {
return token{}, err
}
value = strings.Replace(value, "\\`", "`", -1)
return token{
tokenType: tJSONLiteral,
value: value,
position: start,
length: len(value),
}, nil
}
func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
start := lexer.currentPos
currentIndex := start
current := lexer.next()
for current != '\'' && lexer.peek() != eof {
if current == '\\' && lexer.peek() == '\'' {
chunk := lexer.expression[currentIndex : lexer.currentPos-1]
lexer.buf.WriteString(chunk)
lexer.buf.WriteString("'")
lexer.next()
currentIndex = lexer.currentPos
}
current = lexer.next()
}
if lexer.lastWidth == 0 {
// Then we hit an EOF so we never reached the closing
// delimiter.
return token{}, SyntaxError{
msg: "Unclosed delimiter: '",
Expression: lexer.expression,
Offset: len(lexer.expression),
}
}
if currentIndex < lexer.currentPos {
lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
}
value := lexer.buf.String()
// Reset the buffer so it can reused again.
lexer.buf.Reset()
return token{
tokenType: tStringLiteral,
value: value,
position: start,
length: len(value),
}, nil
}
func (lexer *Lexer) syntaxError(msg string) SyntaxError {
return SyntaxError{
msg: msg,
Expression: lexer.expression,
Offset: lexer.currentPos - 1,
}
}
// Checks for a two char token, otherwise matches a single character
// token. This is used whenever a two char token overlaps a single
// char token, e.g. "||" -> tPipe, "|" -> tOr.
func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
start := lexer.currentPos - lexer.lastWidth
nextRune := lexer.next()
var t token
if nextRune == second {
t = token{
tokenType: matchedType,
value: string(first) + string(second),
position: start,
length: 2,
}
} else {
lexer.back()
t = token{
tokenType: singleCharType,
value: string(first),
position: start,
length: 1,
}
}
return t
}
func (lexer *Lexer) consumeLBracket() token {
// There's three options here:
// 1. A filter expression "[?"
// 2. A flatten operator "[]"
// 3. A bare rbracket "["
start := lexer.currentPos - lexer.lastWidth
nextRune := lexer.next()
var t token
if nextRune == '?' {
t = token{
tokenType: tFilter,
value: "[?",
position: start,
length: 2,
}
} else if nextRune == ']' {
t = token{
tokenType: tFlatten,
value: "[]",
position: start,
length: 2,
}
} else {
t = token{
tokenType: tLbracket,
value: "[",
position: start,
length: 1,
}
lexer.back()
}
return t
}
func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
start := lexer.currentPos
value, err := lexer.consumeUntil('"')
if err != nil {
return token{}, err
}
var decoded string
asJSON := []byte("\"" + value + "\"")
if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
return token{}, err
}
return token{
tokenType: tQuotedIdentifier,
value: decoded,
position: start - 1,
length: len(decoded),
}, nil
}
func (lexer *Lexer) consumeUnquotedIdentifier() token {
// Consume runes until we reach the end of an unquoted
// identifier.
start := lexer.currentPos - lexer.lastWidth
for {
r := lexer.next()
if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
lexer.back()
break
}
}
value := lexer.expression[start:lexer.currentPos]
return token{
tokenType: tUnquotedIdentifier,
value: value,
position: start,
length: lexer.currentPos - start,
}
}
func (lexer *Lexer) consumeNumber() token {
// Consume runes until we reach something that's not a number.
start := lexer.currentPos - lexer.lastWidth
for {
r := lexer.next()
if r < '0' || r > '9' {
lexer.back()
break
}
}
value := lexer.expression[start:lexer.currentPos]
return token{
tokenType: tNumber,
value: value,
position: start,
length: lexer.currentPos - start,
}
}

603
vendor/github.com/jmespath/go-jmespath/parser.go

@ -0,0 +1,603 @@
package jmespath
import (
"encoding/json"
"fmt"
"strconv"
"strings"
)
type astNodeType int
//go:generate stringer -type astNodeType
const (
ASTEmpty astNodeType = iota
ASTComparator
ASTCurrentNode
ASTExpRef
ASTFunctionExpression
ASTField
ASTFilterProjection
ASTFlatten
ASTIdentity
ASTIndex
ASTIndexExpression
ASTKeyValPair
ASTLiteral
ASTMultiSelectHash
ASTMultiSelectList
ASTOrExpression
ASTAndExpression
ASTNotExpression
ASTPipe
ASTProjection
ASTSubexpression
ASTSlice
ASTValueProjection
)
// ASTNode represents the abstract syntax tree of a JMESPath expression.
type ASTNode struct {
nodeType astNodeType
value interface{}
children []ASTNode
}
func (node ASTNode) String() string {
return node.PrettyPrint(0)
}
// PrettyPrint will pretty print the parsed AST.
// The AST is an implementation detail and this pretty print
// function is provided as a convenience method to help with
// debugging. You should not rely on its output as the internal
// structure of the AST may change at any time.
func (node ASTNode) PrettyPrint(indent int) string {
spaces := strings.Repeat(" ", indent)
output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType)
nextIndent := indent + 2
if node.value != nil {
if converted, ok := node.value.(fmt.Stringer); ok {
// Account for things like comparator nodes
// that are enums with a String() method.
output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String())
} else {
output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value)
}
}
lastIndex := len(node.children)
if lastIndex > 0 {
output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent))
childIndent := nextIndent + 2
for _, elem := range node.children {
output += elem.PrettyPrint(childIndent)
}
}
output += fmt.Sprintf("%s}\n", spaces)
return output
}
var bindingPowers = map[tokType]int{
tEOF: 0,
tUnquotedIdentifier: 0,
tQuotedIdentifier: 0,
tRbracket: 0,
tRparen: 0,
tComma: 0,
tRbrace: 0,
tNumber: 0,
tCurrent: 0,
tExpref: 0,
tColon: 0,
tPipe: 1,
tOr: 2,
tAnd: 3,
tEQ: 5,
tLT: 5,
tLTE: 5,
tGT: 5,
tGTE: 5,
tNE: 5,
tFlatten: 9,
tStar: 20,
tFilter: 21,
tDot: 40,
tNot: 45,
tLbrace: 50,
tLbracket: 55,
tLparen: 60,
}
// Parser holds state about the current expression being parsed.
type Parser struct {
expression string
tokens []token
index int
}
// NewParser creates a new JMESPath parser.
func NewParser() *Parser {
p := Parser{}
return &p
}
// Parse will compile a JMESPath expression.
func (p *Parser) Parse(expression string) (ASTNode, error) {
lexer := NewLexer()
p.expression = expression
p.index = 0
tokens, err := lexer.tokenize(expression)
if err != nil {
return ASTNode{}, err
}
p.tokens = tokens
parsed, err := p.parseExpression(0)
if err != nil {
return ASTNode{}, err
}
if p.current() != tEOF {
return ASTNode{}, p.syntaxError(fmt.Sprintf(
"Unexpected token at the end of the expresssion: %s", p.current()))
}
return parsed, nil
}
func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) {
var err error
leftToken := p.lookaheadToken(0)
p.advance()
leftNode, err := p.nud(leftToken)
if err != nil {
return ASTNode{}, err
}
currentToken := p.current()
for bindingPower < bindingPowers[currentToken] {
p.advance()
leftNode, err = p.led(currentToken, leftNode)
if err != nil {
return ASTNode{}, err
}
currentToken = p.current()
}
return leftNode, nil
}
func (p *Parser) parseIndexExpression() (ASTNode, error) {
if p.lookahead(0) == tColon || p.lookahead(1) == tColon {
return p.parseSliceExpression()
}
indexStr := p.lookaheadToken(0).value
parsedInt, err := strconv.Atoi(indexStr)
if err != nil {
return ASTNode{}, err
}
indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt}
p.advance()
if err := p.match(tRbracket); err != nil {
return ASTNode{}, err
}
return indexNode, nil
}
func (p *Parser) parseSliceExpression() (ASTNode, error) {
parts := []*int{nil, nil, nil}
index := 0
current := p.current()
for current != tRbracket && index < 3 {
if current == tColon {
index++
p.advance()
} else if current == tNumber {
parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value)
if err != nil {
return ASTNode{}, err
}
parts[index] = &parsedInt
p.advance()
} else {
return ASTNode{}, p.syntaxError(
"Expected tColon or tNumber" + ", received: " + p.current().String())
}
current = p.current()
}
if err := p.match(tRbracket); err != nil {
return ASTNode{}, err
}
return ASTNode{
nodeType: ASTSlice,
value: parts,
}, nil
}
func (p *Parser) match(tokenType tokType) error {
if p.current() == tokenType {
p.advance()
return nil
}
return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String())
}
func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) {
switch tokenType {
case tDot:
if p.current() != tStar {
right, err := p.parseDotRHS(bindingPowers[tDot])
return ASTNode{
nodeType: ASTSubexpression,
children: []ASTNode{node, right},
}, err
}
p.advance()
right, err := p.parseProjectionRHS(bindingPowers[tDot])
return ASTNode{
nodeType: ASTValueProjection,
children: []ASTNode{node, right},
}, err
case tPipe:
right, err := p.parseExpression(bindingPowers[tPipe])
return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err
case tOr:
right, err := p.parseExpression(bindingPowers[tOr])
return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err
case tAnd:
right, err := p.parseExpression(bindingPowers[tAnd])
return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err
case tLparen:
name := node.value
var args []ASTNode
for p.current() != tRparen {
expression, err := p.parseExpression(0)
if err != nil {
return ASTNode{}, err
}
if p.current() == tComma {
if err := p.match(tComma); err != nil {
return ASTNode{}, err
}
}
args = append(args, expression)
}
if err := p.match(tRparen); err != nil {
return ASTNode{}, err
}
return ASTNode{
nodeType: ASTFunctionExpression,
value: name,
children: args,
}, nil
case tFilter:
return p.parseFilter(node)
case tFlatten:
left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}}
right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
return ASTNode{
nodeType: ASTProjection,
children: []ASTNode{left, right},
}, err
case tEQ, tNE, tGT, tGTE, tLT, tLTE:
right, err := p.parseExpression(bindingPowers[tokenType])
if err != nil {
return ASTNode{}, err
}
return ASTNode{
nodeType: ASTComparator,
value: tokenType,
children: []ASTNode{node, right},
}, nil
case tLbracket:
tokenType := p.current()
var right ASTNode
var err error
if tokenType == tNumber || tokenType == tColon {
right, err = p.parseIndexExpression()
if err != nil {
return ASTNode{}, err
}
return p.projectIfSlice(node, right)
}
// Otherwise this is a projection.
if err := p.match(tStar); err != nil {
return ASTNode{}, err
}
if err := p.match(tRbracket); err != nil {
return ASTNode{}, err
}
right, err = p.parseProjectionRHS(bindingPowers[tStar])
if err != nil {
return ASTNode{}, err
}
return ASTNode{
nodeType: ASTProjection,
children: []ASTNode{node, right},
}, nil
}
return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String())
}
func (p *Parser) nud(token token) (ASTNode, error) {
switch token.tokenType {
case tJSONLiteral:
var parsed interface{}
err := json.Unmarshal([]byte(token.value), &parsed)
if err != nil {
return ASTNode{}, err
}
return ASTNode{nodeType: ASTLiteral, value: parsed}, nil
case tStringLiteral:
return ASTNode{nodeType: ASTLiteral, value: token.value}, nil
case tUnquotedIdentifier:
return ASTNode{
nodeType: ASTField,
value: token.value,
}, nil
case tQuotedIdentifier:
node := ASTNode{nodeType: ASTField, value: token.value}
if p.current() == tLparen {
return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token)
}
return node, nil
case tStar:
left := ASTNode{nodeType: ASTIdentity}
var right ASTNode
var err error
if p.current() == tRbracket {
right = ASTNode{nodeType: ASTIdentity}
} else {
right, err = p.parseProjectionRHS(bindingPowers[tStar])
}
return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err
case tFilter:
return p.parseFilter(ASTNode{nodeType: ASTIdentity})
case tLbrace:
return p.parseMultiSelectHash()
case tFlatten:
left := ASTNode{
nodeType: ASTFlatten,
children: []ASTNode{{nodeType: ASTIdentity}},
}
right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
if err != nil {
return ASTNode{}, err
}
return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil
case tLbracket:
tokenType := p.current()
//var right ASTNode
if tokenType == tNumber || tokenType == tColon {
right, err := p.parseIndexExpression()
if err != nil {
return ASTNode{}, nil
}
return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right)
} else if tokenType == tStar && p.lookahead(1) == tRbracket {
p.advance()
p.advance()
right, err := p.parseProjectionRHS(bindingPowers[tStar])
if err != nil {
return ASTNode{}, err
}
return ASTNode{
nodeType: ASTProjection,
children: []ASTNode{{nodeType: ASTIdentity}, right},
}, nil
} else {
return p.parseMultiSelectList()
}
case tCurrent:
return ASTNode{nodeType: ASTCurrentNode}, nil
case tExpref:
expression, err := p.parseExpression(bindingPowers[tExpref])
if err != nil {
return ASTNode{}, err
}
return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil
case tNot:
expression, err := p.parseExpression(bindingPowers[tNot])
if err != nil {
return ASTNode{}, err
}
return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil
case tLparen:
expression, err := p.parseExpression(0)
if err != nil {
return ASTNode{}, err
}
if err := p.match(tRparen); err != nil {
return ASTNode{}, err
}
return expression, nil
case tEOF:
return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token)
}
return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token)
}
func (p *Parser) parseMultiSelectList() (ASTNode, error) {
var expressions []ASTNode
for {
expression, err := p.parseExpression(0)
if err != nil {
return ASTNode{}, err
}
expressions = append(expressions, expression)
if p.current() == tRbracket {
break
}
err = p.match(tComma)
if err != nil {
return ASTNode{}, err
}
}
err := p.match(tRbracket)
if err != nil {
return ASTNode{}, err
}
return ASTNode{
nodeType: ASTMultiSelectList,
children: expressions,
}, nil
}
func (p *Parser) parseMultiSelectHash() (ASTNode, error) {
var children []ASTNode
for {
keyToken := p.lookaheadToken(0)
if err := p.match(tUnquotedIdentifier); err != nil {
if err := p.match(tQuotedIdentifier); err != nil {
return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier")
}
}
keyName := keyToken.value
err := p.match(tColon)
if err != nil {
return ASTNode{}, err
}
value, err := p.parseExpression(0)
if err != nil {
return ASTNode{}, err
}
node := ASTNode{
nodeType: ASTKeyValPair,
value: keyName,
children: []ASTNode{value},
}
children = append(children, node)
if p.current() == tComma {
err := p.match(tComma)
if err != nil {
return ASTNode{}, nil
}
} else if p.current() == tRbrace {
err := p.match(tRbrace)
if err != nil {
return ASTNode{}, nil
}
break
}
}
return ASTNode{
nodeType: ASTMultiSelectHash,
children: children,
}, nil
}
func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) {
indexExpr := ASTNode{
nodeType: ASTIndexExpression,
children: []ASTNode{left, right},
}
if right.nodeType == ASTSlice {
right, err := p.parseProjectionRHS(bindingPowers[tStar])
return ASTNode{
nodeType: ASTProjection,
children: []ASTNode{indexExpr, right},
}, err
}
return indexExpr, nil
}
func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) {
var right, condition ASTNode
var err error
condition, err = p.parseExpression(0)
if err != nil {
return ASTNode{}, err
}
if err := p.match(tRbracket); err != nil {
return ASTNode{}, err
}
if p.current() == tFlatten {
right = ASTNode{nodeType: ASTIdentity}
} else {
right, err = p.parseProjectionRHS(bindingPowers[tFilter])
if err != nil {
return ASTNode{}, err
}
}
return ASTNode{
nodeType: ASTFilterProjection,
children: []ASTNode{node, right, condition},
}, nil
}
func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) {
lookahead := p.current()
if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) {
return p.parseExpression(bindingPower)
} else if lookahead == tLbracket {
if err := p.match(tLbracket); err != nil {
return ASTNode{}, err
}
return p.parseMultiSelectList()
} else if lookahead == tLbrace {
if err := p.match(tLbrace); err != nil {
return ASTNode{}, err
}
return p.parseMultiSelectHash()
}
return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace")
}
func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) {
current := p.current()
if bindingPowers[current] < 10 {
return ASTNode{nodeType: ASTIdentity}, nil
} else if current == tLbracket {
return p.parseExpression(bindingPower)
} else if current == tFilter {
return p.parseExpression(bindingPower)
} else if current == tDot {
err := p.match(tDot)
if err != nil {
return ASTNode{}, err
}
return p.parseDotRHS(bindingPower)
} else {
return ASTNode{}, p.syntaxError("Error")
}
}
func (p *Parser) lookahead(number int) tokType {
return p.lookaheadToken(number).tokenType
}
func (p *Parser) current() tokType {
return p.lookahead(0)
}
func (p *Parser) lookaheadToken(number int) token {
return p.tokens[p.index+number]
}
func (p *Parser) advance() {
p.index++
}
func tokensOneOf(elements []tokType, token tokType) bool {
for _, elem := range elements {
if elem == token {
return true
}
}
return false
}
func (p *Parser) syntaxError(msg string) SyntaxError {
return SyntaxError{
msg: msg,
Expression: p.expression,
Offset: p.lookaheadToken(0).position,
}
}
// Create a SyntaxError based on the provided token.
// This differs from syntaxError() which creates a SyntaxError
// based on the current lookahead token.
func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError {
return SyntaxError{
msg: msg,
Expression: p.expression,
Offset: t.position,
}
}

16
vendor/github.com/jmespath/go-jmespath/toktype_string.go

@ -0,0 +1,16 @@
// generated by stringer -type=tokType; DO NOT EDIT
package jmespath
import "fmt"
const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
func (i tokType) String() string {
if i < 0 || i >= tokType(len(_tokType_index)-1) {
return fmt.Sprintf("tokType(%d)", i)
}
return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
}

185
vendor/github.com/jmespath/go-jmespath/util.go

@ -0,0 +1,185 @@
package jmespath
import (
"errors"
"reflect"
)
// IsFalse determines if an object is false based on the JMESPath spec.
// JMESPath defines false values to be any of:
// - An empty string array, or hash.
// - The boolean value false.
// - nil
func isFalse(value interface{}) bool {
switch v := value.(type) {
case bool:
return !v
case []interface{}:
return len(v) == 0
case map[string]interface{}:
return len(v) == 0
case string:
return len(v) == 0
case nil:
return true
}
// Try the reflection cases before returning false.
rv := reflect.ValueOf(value)
switch rv.Kind() {
case reflect.Struct:
// A struct type will never be false, even if
// all of its values are the zero type.
return false
case reflect.Slice, reflect.Map:
return rv.Len() == 0
case reflect.Ptr:
if rv.IsNil() {
return true
}
// If it's a pointer type, we'll try to deref the pointer
// and evaluate the pointer value for isFalse.
element := rv.Elem()
return isFalse(element.Interface())
}
return false
}
// ObjsEqual is a generic object equality check.
// It will take two arbitrary objects and recursively determine
// if they are equal.
func objsEqual(left interface{}, right interface{}) bool {
return reflect.DeepEqual(left, right)
}
// SliceParam refers to a single part of a slice.
// A slice consists of a start, a stop, and a step, similar to
// python slices.
type sliceParam struct {
N int
Specified bool
}
// Slice supports [start:stop:step] style slicing that's supported in JMESPath.
func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) {
computed, err := computeSliceParams(len(slice), parts)
if err != nil {
return nil, err
}
start, stop, step := computed[0], computed[1], computed[2]
result := []interface{}{}
if step > 0 {
for i := start; i < stop; i += step {
result = append(result, slice[i])
}
} else {
for i := start; i > stop; i += step {
result = append(result, slice[i])
}
}
return result, nil
}
func computeSliceParams(length int, parts []sliceParam) ([]int, error) {
var start, stop, step int
if !parts[2].Specified {
step = 1
} else if parts[2].N == 0 {
return nil, errors.New("Invalid slice, step cannot be 0")
} else {
step = parts[2].N
}
var stepValueNegative bool
if step < 0 {
stepValueNegative = true
} else {
stepValueNegative = false
}
if !parts[0].Specified {
if stepValueNegative {
start = length - 1
} else {
start = 0
}
} else {
start = capSlice(length, parts[0].N, step)
}
if !parts[1].Specified {
if stepValueNegative {
stop = -1
} else {
stop = length
}
} else {
stop = capSlice(length, parts[1].N, step)
}
return []int{start, stop, step}, nil
}
func capSlice(length int, actual int, step int) int {
if actual < 0 {
actual += length
if actual < 0 {
if step < 0 {
actual = -1
} else {
actual = 0
}
}
} else if actual >= length {
if step < 0 {
actual = length - 1
} else {
actual = length
}
}
return actual
}
// ToArrayNum converts an empty interface type to a slice of float64.
// If any element in the array cannot be converted, then nil is returned
// along with a second value of false.
func toArrayNum(data interface{}) ([]float64, bool) {
// Is there a better way to do this with reflect?
if d, ok := data.([]interface{}); ok {
result := make([]float64, len(d))
for i, el := range d {
item, ok := el.(float64)
if !ok {
return nil, false
}
result[i] = item
}
return result, true
}
return nil, false
}
// ToArrayStr converts an empty interface type to a slice of strings.
// If any element in the array cannot be converted, then nil is returned
// along with a second value of false. If the input data could be entirely
// converted, then the converted data, along with a second value of true,
// will be returned.
func toArrayStr(data interface{}) ([]string, bool) {
// Is there a better way to do this with reflect?
if d, ok := data.([]interface{}); ok {
result := make([]string, len(d))
for i, el := range d {
item, ok := el.(string)
if !ok {
return nil, false
}
result[i] = item
}
return result, true
}
return nil, false
}
func isSliceType(v interface{}) bool {
if v == nil {
return false
}
return reflect.TypeOf(v).Kind() == reflect.Slice
}
Loading…
Cancel
Save